id
stringlengths
5
11
text
stringlengths
0
146k
title
stringclasses
1 value
doc_500
jQuery("li.social").hover(function() { jQuery(this).find("img").stop(true, true).animate({ 'marginTop': "-=20px" }, 'fast'); }, function() { jQuery(this).find("img").stop(true, true).animate({ 'marginTop': "+=20px" }, 'fast'); }) jQuery("li.reservas").hover(function() { jQuery(this).find("img").stop(true, true).fadeOut({ 'marginTop': "-=30px" }, 'slow'); }, function() { jQuery(this).find("img").stop(true, true).fadeIn({ 'marginTop': "+=30px" }, 'slow'); }) jQuery("ul.menu li").hover(function() { jQuery(this).find("a").stop(true, true).animate({ 'borderBottomColor': '#2E9ECE', 'color': '2E9ECE' }, 'slow'); }, function() { jQuery(this).find("a").stop(true, true).animate({ 'borderBottomColor': '#FFDF85', 'color': 'FFDF85' }, 'slow'); })​ A: By looking at your code I can tell that you've forgotten # near css colors, so instead of this 'color': '2E9ECE' use this 'color': '#2E9ECE'. You may also want to work on your style, I have rewritten your last hover to something like this: $('ul.menu li a').hover( function() { // do this on hover $(this).animate({ 'borderBottomColor': '#2E9ECE', 'color': '#2E9ECE' }, 'slow'); }, function() { // do this on hover out $(this).animate({ 'borderBottomColor': '#FFDF85', 'color': '#FEFEFE' }, 'slow'); } ); which, in my opinion, is more readable and shorter. Take a look at jQuery API hover and animate UPDATE: I've verified, this code works (tested with newest versions of FireFox and Chrome): <html> <head> <script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js"></script> <script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.18/jquery-ui.min.js"></script> <script type="text/javascript"> $(function() { $("a").hover( function() { $(this).animate({ color: "#00ff00" }, 'slow'); },function() { $(this).animate({ color: "#ff0000" }, 'slow'); }); }); </script> </head> <body> <a href="#">aaa</a><br /> <a href="#">bbb</a><br /> </body> </html>
doc_501
A: Just add a listener to the TextEditingController. something like below. @override void initState() { super.initState(); _editingController.addListener(() { print(_editingController.text); }); } @override void dispose() { // Clean up the controller when the Widget is disposed _editingController.dispose(); super.dispose(); } Hope it helps! A: use the onChange(text) function of the TextFormField with a ValueNotifier, this should help you out. TextFormField( controller: _Controller, label: "Input", currentNode: _Node, nextNode: FocusNode(), keyboard: TextInputType.visiblePassword, onChanged: (text) { _avalueNotifier.value = text; }, inputFormatters: <TextInputFormatter>[ BlacklistingTextInputFormatter.singleLineFormatter, ], validator: (String value) { if (value.isEmpty) { return 'Please make inputs'; } return null; }, ),
doc_502
I have designed my application using MVC architecture. Here is my Code: title : 'Trade Data', store : 'RamDataStore', id:'tradedatagrid', dockedItems:[{ xtype:'pagingtoolbar', store:'TradeDataStore', displayInfo:true, id:'tradepage', itemId:'tradepage', displayMsg:'{0}-{1} of {2}', emptyMsg:'no topics to show', dock:'bottom'} ], columns : [ { xtype : 'gridcolumn', width : 85,align : 'center', dataIndex : 'tradeid', text : 'TradeId' }, { xtype : 'gridcolumn', width : 120, dataIndex : 'instrumentType', text : 'InstrumentType' }, { xtype : 'gridcolumn', width : 103, align : 'center', dataIndex : 'tradeBook', text : 'TradingBook' }, { xtype : 'gridcolumn', width : 120, align : 'center', dataIndex : 'otherBook', text : 'CustomerBook' }, ] Here my paging tool bar store and my grid store are the same. Store: I defined my store with some default properties and I created an instance for the same store in the controller to dynamically bind. Ext.define('Myapp.store.RamDataStore', { extend: 'Ext.data.Store', requires: ['MyApp.model.ram.RamDataModel'], constructor: function(cfg) { var me = this; cfg = cfg || {}; me.callParent([Ext.apply({ storeId: 'tradedata', autoLoad: false, pageSize: 4, model: 'MyApp.model.ram.RamDataModel', proxy:{ writer:{ type:'json' }, reader:{ type:'json' }, enablePaging: true }, sorters: [{ property: 'tradeid', direction: 'ASC' }] }, cfg)]); } }); Model: Ext.define('MyApp.model.ram.RamDataModel', { extend : 'Ext.data.Model', fields : [{ name:'tradeid', type:'int' }, { name : 'tradeBook', type : 'string' }, { name : 'otherBook', type : 'string' }, { name : 'tradeDate', type : 'auto' }, { name : 'tradedDate', type : 'auto' }}); Controller: I wrote a function that will call on button clicks, and I got a JSON result from the server: data = [{"tradeid":1,"tradingbook":"ram"},{"tradeid:2,"tradingbook":"testbook"}] //(etc) Here is my controller code: var datastore = Ext.create('MyApp.store.RamDataStore',{ model:'Myapp.model.ram.RamDataModel', data:Ext.decode(result,true), pageSize:4, start:0, limit:4, enablePaging : true, proxy:{ type:'memory', reader:{type:'json'}, writer:{type:'json'}, }, listeners:{ beforeload:function(store,operation,eOpts){ store.proxy.data.total=Ext.decode(result,true).length; //store.proxy.data=Ext.decode(result,true); } }, }); Ext.getCmp('tradedatagrid').getDockedComponent('tradepage').bind(datastore); Ext.getCmp('tradedatagrid').getView().bindStore(datastore); Ext.getCmp('tradedatagrid').getView().loadMask.hide(); } }); With this code, I can add data to my grid, but can't add store to my paging tool bar. Please help on this. If you have any examples, please suggest & I will follow. Thanks. A: You specify the store for paging toolbar as string what means that Store Manager assumes the string is storeId and tries to find the instance of it. But it cannot because the store is probably created later. Also, the store must be same for both the grid and paging toolbar. You have two options: * *declare the store in your controller: stores:['RamDataStore'] *create it manually during grid initComponent where you would also create the paging toolbar and assign the store to it.
doc_503
Until now and it was all good except that I can not get the user's mail, this method picks me user name, id, google plus account but do not bring me the mail account.And look in google and can not find solution, someone I succor please. Yii::$app->user->identity A: I answer myself, because I found the solution and put it in case someone else's worth it: * *Edit file GoogleOAuth2Service.php *Change the 'scope' of the next line: protected $scopes = array(self::SCOPE_USERINFO_PROFILE); *For this other: protected $scopes = array(self::SCOPE_USERINFO_EMAIL); *And finally display the mail attribute. $this->attributes['email'] = $info['email'];
doc_504
<!DOCTYPE html> <html> <head> <script> function showHint() { var xmlhttp = new XMLHttpRequest(); xmlhttp.onreadystatechange = function() { if (this.readyState == 4 && this.status == 200) { document.getElementById("txtHint").innerHTML = this.responseText; } } xmlhttp.open("GET", "AjaxTest1.html", true); xmlhttp.send(); } </script> </head> <body> <button style="width:50px;height:50px;" onclick="showHint()">Click</button> <div id="txtHint"> </div> </body> </html> The requested AjaxTest1.html page code is, <!DOCTYPE html> <html> <style> input[type=checkbox] { transform: scale(1.5); } </style> <body id="IGG" onload="myFunction()" style="background-color:#AFF5EA"> <script> function myFunction(){ // submisson button var del=document.createElement("Input"); del.type="submit"; del.value="Delete"; del.style="margin-left:40%;margin-top:10%;"; document.getElementById("IGG").appendChild(del); } </script> <h2>Hello this is fine</h2> </body> </html> Output when i click button to laod AjaxTest1.html whereas i need the js and css to also come along with html in responseText.
doc_505
Initial script is to git clone the repos const shell = require('shelljs') const path = '/home/test/' shell.cd(path) shell.exec('git clone https://github.com/test/mic.git') This is a java script and it does clone the repo. node git.js and it simply clones the repos Now I have another script which needs to get the result of the above script and pass it to a variable which then says if it is success of failure. var body = response.getResponseBody(); var res = JSON.parse(body); if (res.flag < 1){ api.setValue("flag", "failed"); } Is there a way to integrate these scripts to get the right results. All i want is if the first script will success/fail and get the status as a result which can be passed as a flag to another variable. Any directions is really helpful A: Shell.exec takes a callback. If you have an error the code parameter in the callback should be a non-zero shell.exec('git clone https://github.com/test/mic.git', (code, stdout, stderr) => { if(code === 0) { // No Error } else { // Had an error } })
doc_506
It is similar to the problem we have with registration: A straightforward implementation gives something like public static void doRegistration(User user) { //... } The user parameter is a JPA Entity. The User model looks something like this: @Entity public class User extends Model { //some other irrelevant fields @OneToMany(cascade = CascadeType.ALL) public Collection<Query> queries; @OneToMany(cascade = CascadeType.ALL) public Collection<Activity> activities; //... I have read here and there that this fails. Now, in Play!, what is the best course of action we can take? There must be some way to put all that data that has to go to the server in one object, that easily can be saved into the database. EDIT: The reason this fails is because of the validation fail. It somehow says "incorrect value" when validating collection objects. I was wondering if this can be avoided. SOLUTION: Changing the Collection to List has resolved the issue. This is a bug that will be fixed in play 1.2 :) Thanks beforehand A: this works. To be more clear, you can define a controller method like the one you wrote: public static void doRegistration(User user) { //... } That's completely fine. Just map it to a POST route and use a #{form} to submit the object, like: #{form id:'myId', action:@Application.doRegistration()} #{field user.id} [...] #{/form} This works. You may have problems if you don't add all the field of the entity in the form (if some fields are not editable, either use hidden inputs or a NoBinding annotation as described here). EDIT: on the OneToMany subject, the relation will be managed by the "Many" side. That side has to keep the id of the related entity as a hidden field (with a value of object.user.id). This will solve all related issues. A: It doesn't fail. If you have a running transaction, the whole thing will be persisted. Just note that transactions are usually running within services, not controllers, so you should pass it from the controller to the service.
doc_507
Table 1 ID DATA 1 'A' 2 'B' 3 'C' 4 'D' 5 'E' 6 'F' Table 2 ID DATA 2 'G' 3 'F' 4 'Q' How do I insert data into Table 1 from Table 2 where Table 2 doesn't have Table 1's ID? In other words, I'd like the following result: Table 2 ID DATA 1 'A' 2 'G' 3 'F' 4 'Q' 5 'E' 6 'F' A: The wording in your question a little bit confusing because you first ask How do I insert data into Table 1 from Table 2 but then you're showing the desired result for Table2. Now if you want to insert rows from table1 into table2 with ids that doesn't exist in table2 you can do it this way INSERT INTO Table2 (id, data) SELECT id, data FROM Table1 t WHERE NOT EXISTS ( SELECT * FROM Table2 WHERE id = t.id ) Here is SQLFiddle demo or INSERT INTO Table2 (id, data) SELECT t1.id, t1.data FROM Table1 t1 LEFT JOIN Table2 t2 ON t1.id = t2.id WHERE t2.id IS NULL; Here is SQLFiddle demo Outcome (in both cases): | ID | DATA | |----|------| | 1 | A | | 2 | G | | 3 | F | | 4 | Q | | 5 | E | | 6 | F | A: INSERT INTO Table2 (ID, DATA) SELECT a.ID, a.DATA FROM Table1 a JOIN ( SELECT ID FROM Table1 EXCEPT SELECT ID FROM Table2 ) b ON b.ID = a.ID ; Here some code you can run in SSMS that will exemplify this code by: * *Setting up temp tables *Filling the temp tables with data (taken from peterm's SQL Fiddle however heavily modified) *Executing the above *Then cleaning up after itself by destroying the temp tables . PRINT 'Dropping and creating temp tables'; IF OBJECT_ID('tempdb.dbo.#t1') IS NOT NULL DROP TABLE #t1; IF OBJECT_ID('tempdb.dbo.#t2') IS NOT NULL DROP TABLE #t2; CREATE TABLE #t1 ( ID INT, DATA VARCHAR(1) ); CREATE TABLE #t2 ( ID INT, DATA VARCHAR(1)) ; PRINT 'Temp tables created'; PRINT 'Inserting test data into temp tables'; INSERT INTO #t1 ( ID, DATA ) VALUES (1, 'A') ,(2, 'B') ,(3, 'C') ,(4, 'D') ,(5, 'E') ,(6, 'F') ; INSERT INTO #t2 ( ID, DATA ) VALUES (2, 'G') ,(3, 'F') ,(4, 'Q') ; PRINT 'Test data inserted'; PRINT 'SELECTING temp tables before modifying data' SELECT * FROM #t1; SELECT * FROM #t2; PRINT 'Modifying data' INSERT INTO #t2 (ID, DATA) SELECT a.ID, a.DATA FROM #t1 a JOIN ( SELECT ID FROM #t1 EXCEPT SELECT ID FROM #t2 ) b ON b.ID = a.ID ; PRINT 'SELECTING "after" modifying data' SELECT * FROM #t1 SELECT * FROM #t2 PRINT 'Cleaning up (destroying temp tables)' IF OBJECT_ID('tempdb.dbo.#t1') IS NOT NULL DROP TABLE #t1; IF OBJECT_ID('tempdb.dbo.#t2') IS NOT NULL DROP TABLE #t2;
doc_508
//example А class Game{ public: Game(int inWidth, int inHeight, char const * Intitle); }; Game::Game(int inWidth, int inHeight, char const * Intitle){ gameWindow gamewindowObj=gameWindow(inWidth, inHeight, Intitle); } //example В class Game{ public: Game(int inWidth, int inHeight, char const * Intitle); private: gameWindow gamewindowObj=gameWindow(inWidth, inHeight, Intitle); }; Game::Game(int inWidth, int inHeight, char const * Intitle){} A: If you want gamewindowObj to be a data member and be initialized by the constructor's arguments, you can use member initializer list, e.g. class Game{ public: Game(int inWidth, int inHeight, char const * Intitle); private: gameWindow gamewindowObj; }; Game::Game(int inWidth, int inHeight, char const * Intitle) : gamewindowObj(inWidth, inHeight, Intitle) { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ }
doc_509
I am using this formula in VBA but it is not working. Syntax looks fine. A: Too many ")" .Range("BS2:BS" & NewLastRow).Formula = "=IF(OR(BR2=""FLAG"",BO2>0),""FLAG"",""NOFLAG"" )" A: In general, try the following: * *Make a workable formula in Excel *Then select the cell with the workable formula *Run the following code Public Sub PrintMeUsefulFormula() Dim strFormula As String Dim strParenth As String strParenth = """" strFormula = Selection.Formula strFormula = Replace(strFormula, """", """""") strFormula = strParenth & strFormula & strParenth Debug.Print strFormula End Sub * *In the immediate window something useful should be printed. Source: Apply formula in VBA?
doc_510
This is my code: public T GetById(object id) { T entity; entity = (T)ReportHttpModule.CurrentSession.Get(typeof(T), id); return entity; } note that I don't get this error when use CreateCriteria(T) before calling that method! A: I've found the solution. It is weird but it was a problem in mapping file. One of my columns had not-null="true" in mapping file but in database was not nullable!
doc_511
A: The "set EditMode property of the DataGridView to EditOnEnter" worked for me, but I found another problem: user can't delete a row by just selecting and pressing DEL key. So, a google search gave me another way to do it. Just catch the event CellEnter and check if the cell is the appropriated type to perform appropriated action like this sample code: private void Form_OnLoad(object sender, EventArgs e){ dgvArmazem.CellEnter += new DataGridViewCellEventHandler(dgvArmazem_CellEnter); } void dgvArmazem_CellEnter(object sender, DataGridViewCellEventArgs e) { DataGridView dg = (DataGridView)sender; if (dg.CurrentCell.EditType == typeof(DataGridViewComboBoxEditingControl)) { SendKeys.Send("{F4}"); } } Now the ComboBox drops down faster and the user still delete a row by selecting a row and pressing DEL key. That's it. A: Set EditMode property of the DataGridView to EditOnEnter: link DataGridView.EditMode - Gets or sets a value indicating how to begin editing a cell. EditOnEnter - Editing begins when the cell receives focus. A: You can achieve this by subscribing for the EditingControlShowing event of the grid and there for control of type ComboBox ComboBox ctl = e.Control as ComboBox; ctl.Enter -= new EventHandler(ctl_Enter); ctl.Enter += new EventHandler(ctl_Enter); And in the Enter event, use the property void ctl_Enter(object sender, EventArgs e) { (sender as ComboBox).DroppedDown = true; } DroppedDown indicates as the name suggests whether the dropdown area is shown or not, so whenever the control is entered this will set it to true and display the items without the need of further clicks.
doc_512
UPDATE: after checking for errors I receive the following in the log: Domain=com.apple.LocalAuthentication Code=-4 "Canceled by another authentication." Code: @interface LoginViewController () @property (nonatomic, strong) LAContext *context; @end @implementation LoginViewController - (LAContext *)context { if (_context == nil) { _context = [LAContext new]; } return _context; } - (void)viewDidLoad { [super viewDidLoad]; NSError *error; BOOL canAuthentication = [self.context canEvaluatePolicy:LAPolicyDeviceOwnerAuthentication error:&error]; if (canAuthentication) { [self.context evaluatePolicy:LAPolicyDeviceOwnerAuthentication localizedReason:@"FaceID" reply:^(BOOL success, NSError * _Nullable error) { if (success) { UIAlertController *alearC = [UIAlertController alertControllerWithTitle:@"Success" message:nil preferredStyle:UIAlertControllerStyleAlert]; [alearC addAction:[UIAlertAction actionWithTitle:@"OK" style:UIAlertActionStyleDefault handler:^(UIAlertAction * _Nonnull action) { }]]; [self presentViewController:alearC animated:YES completion:nil]; } else { NSLog(@"error%@",error); } }]; } } A: The reason to this is often because the fingerprint scanner is still occupied by a previous operationg . It doesnt seem as if you have used cancellationSignal.cancel or if you have it is probably not getting trigered . Each time the Scanner is done using you have to send the signal by using cancellationSignal.cancel to say that the scanner has done it's job . For example if the task fails then it would look like this , text_state.setText(context.getString(R.string.fingerFailure)); cancellationSignal.cancel(); try adding the cancellationSignal.cancel() above or below this [self presentViewController:alearC animated:YES completion:nil]; and also if needed in the else block as well . Let me know if this fixes your issue if not i will look into it
doc_513
I'm trying to use a simple get_metric_statistics script to return information about CPUUtilization for an instance. Here is the script I'm looking to use: import boto3 import datetime cw = boto3.client('cloudwatch') cw.get_metric_statistics( 300, datetime.datetime.utcnow() - datetime.timedelta(seconds=600), datetime.datetime.utcnow(), 'CPUUtilization', 'AWS/EC2', 'Average', {'InstanceId':'i-11111111111'}, ) but I keep getting the following message: Traceback (most recent call last): File "C:..../CloudWatch_GetMetricStatistics.py", line 13, in <module> {'InstanceId':'i-0c996c11414476c7c'}, File "C:\Program Files\Python27\lib\site-packages\botocore\client.py", line 251, in _api_call "%s() only accepts keyword arguments." % py_operation_name) TypeError: get_metric_statistics() only accepts keyword arguments. I have: * *Looked at the documentation on Boto3 and I believe I have got everything correctly written/included *Set the correct region/output format/security credentials in the .aws folder *Googled similar problems with put_metric_statistics, etc to try and figure it out I'm still stuck as to what I'm missing? Any guidance would be much appreciated. Many thanks Ben A: Refer to the documentation, and your error message: get_metric_statistics() only accepts keyword agruments Named arguments must be passed to the function as is defined in the docs: get_metric_statistics(**kwargs) A: This works: import boto3 import datetime cw = boto3.client('cloudwatch') cw.get_metric_statistics( Period=300, StartTime=datetime.datetime.utcnow() - datetime.timedelta(seconds=600), EndTime=datetime.datetime.utcnow(), MetricName='CPUUtilization', Namespace='AWS/EC2', Statistics=['Average'], Dimensions=[{'Name':'InstanceId', 'Value':'i-abcd1234'}] ) To find the right values, I use the AWS Command-Line Interface (CLI): aws cloudwatch list-metrics --namespace AWS/EC2 --metric-name CPUUtilization --max-items 1 It returns information such as: { "Metrics": [ { "Namespace": "AWS/EC2", "Dimensions": [ { "Name": "InstanceId", "Value": "i-abcd1234" } ], "MetricName": "CPUUtilization" } ], "NextToken": "xxx" } You can then use these values to populate your get_metric_statistics() requet (such as the Dimensions parameter). A: have you used region_name when trying to get details. Can you share your github to know better, what you are doing.
doc_514
str_extract("temp", pattern = "(?:[^-]*\\-){1}([^\\-]*)$"))) Here is the "temp" variable value: WV-Online-Reading-S1-COMBINED-ELA-3 Here is the extracted output after running this function: ELA-3 Can someone please explain to me how those special characters in "pattern = .." works? Using the same function, I would like to convert this: AIR-GEN-SUM-UD-ELA-NH-COMBINED-3-SEG1 to this: ELA-3 A good reference to those special characters would also be useful. Thanks! A: In order to find the correct regular expression you need to know what exactly you are systematically looking for in your strings. From your post I assume that you want to extract the ELA_ string and the number at the end of the strings. You could do it like this: strings <- c("WV-Online-Reading-S1-COMBINED-ELA-3", "AIR-GEN-SUM-UD-ELA-NH-COMBINED-3-SEG1") gsub(".*(ELA\\-).*(\\d$)", "\\1\\2", strings) [1] "ELA-3" "ELA-1" I will briefly explain the components of the pattern: * *.* matches zero or more arbitraty characters *ELA\\- matches 'ELA-' *\\d$ matches a digit at the end of the line The brackets form a capture group which can be "backreferenced" to by \\1 (first capture group) and \\2 (second capture group). gsub() takes the entire strings and replaces it by what it could match in both backreferences. As I do not know the exact systematic of what you are looking for the pattern might still need adjustments to fit your needs. If you are interested in the first digit only you can get it with library(stringr) strings %>% str_extract("\\d")
doc_515
For instance, I originally had $.get("info/player1.txt", function(data) { p1new = data; }); which I replaced with $.get("http://localhost:8000/info/player1.txt", function(data) { p1new = data; }); but this produces the access error. Completely stumped. This is purely html/css/jquery on a local web server.
doc_516
I see many examples of using paperclip to allow files to be attached to specific models. But these files don't have a one-to-one correspondence to any of my models. How should I model this in Rails 3? The way I would approach it, barring any input from people smarter than me, is to define "file types" for a specific model that is associated with the user account model itself. Then the upload process would place those files in a specific directory where they would be picked up by a poller that would then process the files. A: Polymorphic Paperclip? http://burm.net/2008/10/17/ruby-on-rails-polymorphic-paperclip-plugin-tutorial/
doc_517
I don't understand one thing, I am setting the redirect URL in flow, in the JSON, why on earth is it getting the redirect_url with random port numbers. If I change the port number in the browser URL, it works fine. But still on server it wont open the browser url, I can see it in tomcat logs, but the damn thing does not open the URL. Here are my redirect URL from Google app : http://localhost/authorizeuser http://localhost:8080/ http://localhost:8080 http://localhost http://localhost:8080/Callback https://testserver.net/Callback http://testserver.net/Callback http://127.0.0.1 Here is my client_secret.json : {"web": { "client_id": "clientid", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://accounts.google.com/o/oauth2/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_email": "clientemailstuff", "client_x509_cert_url": "certurlstuff", "client_secret": "itsasecret", "redirect_uris": ["http://localhost:8080/","http://localhost:8080"], "javascript_origins": ["https://testserver.net", "http://testserver.net","http://localhost:8080"] }} And here is the code where I am trying to authenticate : @Override public Credential authorize() throws IOException { InputStream in = DriveQuickstartImpl.class.getResourceAsStream("/client_secret.json"); GoogleClientSecrets clientSecrets = GoogleClientSecrets.load(JSON_FACTORY, new InputStreamReader(in)); GoogleAuthorizationCodeFlow flow = new GoogleAuthorizationCodeFlow.Builder( HTTP_TRANSPORT, JSON_FACTORY, clientSecrets, SCOPES) .setDataStoreFactory(DATA_STORE_FACTORY) .setAccessType("offline") .build(); flow.newAuthorizationUrl().setState("xyz").setRedirectUri("http://localhost:8080/Callback"); Credential credential = new AuthorizationCodeInstalledApp( flow, new LocalServerReceiver()).authorize("user"); if(credential!=null && credential.getRefreshToken() != null){ storeCredentials(credential); } return credential; } This is majorly pissing me off as I am setting the redirect url, and it is just being ignored and why on earth a browser tab wont be opened when application is deployed on server. Update Spring problem also fixed, the below code can be used for GoogleDrive authorization on a server with tomcat or others. @Service @Transactional public class GoogleAuthorization{ @Autowired private DriveQuickstart driveQuickstart; private static final String APPLICATION_NAME ="APPNAME"; private static final java.io.File DATA_STORE_DIR = new java.io.File( "/home/deploy/store"); private static FileDataStoreFactory DATA_STORE_FACTORY; private static final JsonFactory JSON_FACTORY = JacksonFactory.getDefaultInstance(); private static HttpTransport HTTP_TRANSPORT; private static final List<String> SCOPES = Arrays.asList(DriveScopes.DRIVE); private static final String clientid = "clientid"; private static final String clientsecret = "clientsecret"; private static final String CALLBACK_URI = "http://localhost:8080/getgooglelogin"; private String stateToken; private final GoogleAuthorizationCodeFlow flow; public GoogleAuthorization(){ try { HTTP_TRANSPORT = GoogleNetHttpTransport.newTrustedTransport(); DATA_STORE_FACTORY = new FileDataStoreFactory(DATA_STORE_DIR); } catch (GeneralSecurityException | IOException e) { e.printStackTrace(); } flow = new GoogleAuthorizationCodeFlow.Builder(HTTP_TRANSPORT, JSON_FACTORY, clientid, clientsecret, SCOPES).setAccessType("offline").setApprovalPrompt("force").build(); generateStateToken(); } /** * Builds a login URL based on client ID, secret, callback URI, and scope */ public String buildLoginUrl() { final GoogleAuthorizationCodeRequestUrl url = flow.newAuthorizationUrl(); return url.setRedirectUri(CALLBACK_URI).setState(stateToken).build(); } /** * Generates a secure state token */ private void generateStateToken(){ SecureRandom sr1 = new SecureRandom(); stateToken = "google;"+sr1.nextInt(); } /**s * Accessor for state token */ public String getStateToken(){ return stateToken; } /** * Expects an Authentication Code, and makes an authenticated request for the user's profile information * * @param authCode authentication code provided by google */ public void saveCredentials(final String authCode) throws IOException { GoogleTokenResponse response = flow.newTokenRequest(authCode).setRedirectUri(CALLBACK_URI).execute(); Credential credential = flow.createAndStoreCredential(response, null); System.out.println(" Credential access token is "+credential.getAccessToken()); System.out.println("Credential refresh token is "+credential.getRefreshToken()); // The line below gives me a NPE. this.driveQuickstart.storeCredentials(credential); } } Controller method : @RequestMapping(value = "/getgooglelogin") public String getGoogleLogin(HttpServletRequest request, HttpServletResponse response, HttpSession session,Model model) { // Below guy should be autowired if you want to use Spring. GoogleAuthorization helper = new GoogleAuthorization(); if (request.getParameter("code") == null || request.getParameter("state") == null) { model.addAttribute("URL", helper.buildLoginUrl()); session.setAttribute("state", helper.getStateToken()); } else if (request.getParameter("code") != null && request.getParameter("state") != null && request.getParameter("state").equals(session.getAttribute("state"))) { session.removeAttribute("state"); try { helper.saveCredentials(request.getParameter("code")); return "redirect:/dashboard"; } catch (IOException e) { e.printStackTrace(); } } return "newjsp"; } newjsp just has a button to click on the URL. A: Specifically, you're getting random ports because you are using LocalServerReceiver, which starts up a jetty instance on a free port in order to receive an auth code. At a higher level, it looks like you are developing a web server application, but you are trying to use Google OAuth as if it were an installed application. If you are indeed making a web server application, you should be using your server's host name instead of localhost in your callback URL, providing a link for the end user to authenticate using flow.newAuthorizationUrl(), and have your callback fetch the token using flow.newTokenRequest(String). Also make sure that the Client ID you created in your console is of type Web application, or you'll get redirect_uri_mismatch errors. A full working example of how to do this can be found here. A: Instead of using: Credential credential = new AuthorizationCodeInstalledApp( flow, new LocalServerReceiver).authorize("user"); Use LocalServerReceiver localReceiver = new LocalServerReceiver. Builder().setPort(XXXX).build(); for setting a static port number Credential credential = new AuthorizationCodeInstalledApp( flow, localReceiver).authorize("user"); although you wont be able to change redirect url, however you can set the host as well as port. For changing host use .setHost() method You can also use default constructor as: Credential credential = new AuthorizationCodeInstalledApp( flow, new LocalServerReceiver("Host", XXXX).authorize("user");
doc_518
I installed awscli and ran aws configure and set up my aws access keys correctly. ( I know I configured it correctly, because $ aws s3 ls returns the correct lists of my s3 buckets.) However, when I try to write some objects to s3 from django application, it fails producing the error as described in the title. I recently moved to a new instance and started using python virtual environments. Before that, it used to work fine. I have read the questions on SO and the docs from aws. Below is the stack trace. Environment: Request Method: POST Request URL: http://*******/product/4 Django Version: 1.10.6 Python Version: 3.5.2 Installed Applications: ('django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'abc.apps.abcdirectConfig') Installed Middleware: ('django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware') Traceback: File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/django/core/handlers/exception.py" in inner 42. response = get_response(request) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/django/core/handlers/base.py" in _legacy_get_response 249. response = self._get_response(request) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/django/core/handlers/base.py" in _get_response 187. response = self.process_exception_by_middleware(e, request) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/django/core/handlers/base.py" in _get_response 185. response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/django/contrib/auth/decorators.py" in _wrapped_view 23. return view_func(request, *args, **kwargs) File "/home/ubuntu/abcdirect/abcdirect/views.py" in view_product 385. s3.Bucket('abccms').put_object(Key=s3_file_name, Body=s3_file_data) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/boto3/resources/factory.py" in do_action 520. response = action(self, *args, **kwargs) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/boto3/resources/action.py" in __call__ 83. response = getattr(parent.meta.client, operation_name)(**params) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/botocore/client.py" in _api_call 253. return self._make_api_call(operation_name, kwargs) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/botocore/client.py" in _make_api_call 530. operation_model, request_dict) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/botocore/endpoint.py" in make_request 141. return self._send_request(request_dict, operation_model) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/botocore/endpoint.py" in _send_request 166. request = self.create_request(request_dict, operation_model) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/botocore/endpoint.py" in create_request 150. operation_name=operation_model.name) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/botocore/hooks.py" in emit 227. return self._emit(event_name, kwargs) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/botocore/hooks.py" in _emit 210. response = handler(**kwargs) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/botocore/signers.py" in handler 90. return self.sign(operation_name, request) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/botocore/signers.py" in sign 147. auth.add_auth(request) File "/home/ubuntu/.virtualenv/lib/python3.5/site-packages/botocore/auth.py" in add_auth 679. raise NoCredentialsError Exception Type: NoCredentialsError at /product/4 Exception Value: Unable to locate credentials A: I figured out the reason I was getting the error. I am posting the answer just in case someone else encounters this issue. tl;dr : aws config files did not live in apache's home directory The django app was running under the user www-data(apache2). When I configured my credentials using aws configure, my settings were stored in .aws/config file. Now this is where the problem was. The .aws/configure file was stored in my home directory and not in the home directory of www-data(apache2),which is /var/www by default. So when the django app called boto3 module, the module was looking for the config file in /var/www/.aws/config but my files were in /home/ubuntu/.aws/config. Simply copying the relevant files to /var/www/ fixed the problem for me.
doc_519
Let me demo the problem. I'm trying to get the next closest 4pm. Imagine it's 4:30 pm, the next closest 4pm should be tomorrow at 4pm. For days, it's as simple as next {day}, so naturally I tried... (new DateTime("next 4pm"))->format('Y-m-d H:i:s'); // invalid (new DateTime("next 16:00"))->format('Y-m-d H:i:s'); // invalid so, I moved onto this (new DateTime(""))->format('Y-m-d H:i:s'); (new DateTime("4pm"))->format('Y-m-d H:i:s'); (new DateTime("+8 hours"))->format('Y-m-d H:i:s'); (new DateTime("+8 hours 4pm"))->format('Y-m-d H:i:s'); which produces this output: string(19) "2020-06-10 16:47:10" // now string(19) "2020-06-10 16:00:00" // 4pm string(19) "2020-06-11 00:47:10" // +8 hours string(19) "2020-06-11 00:00:00" // +8 hours 4pm ?? Where as I was expecting the +8 hours 4pm to output this: string(19) "2020-06-11 16:00:00" So I went off to the php docs, I found this note Relative statements are always processed after non-relative statements. This makes "+1 week july 2008" and "july 2008 +1 week" equivalent. Exceptions to this rule are: "yesterday", "midnight", "today", "noon" and "tomorrow". Note that "tomorrow 11:00" and "11:00 tomorrow" are different. Considering today's date of "July 23rd, 2008" the first one produces "2008-07-24 11:00" where as the second one produces "2008-07-24 00:00". The reason for this is that those five statements directly influence the current time. So I've tried a few other things leveraging those exceptions but couldn't seem to get anything useful -- and so I tried looking around online and found plenty of people trying something similar, but nothing exactly like this. And sure, it's possible with some additional processing but I'm interested in a pure 1 strtotime / DateTime solution. I'm wondering if I'm missing something simple or if it's not possible. A: You're close. Break up your relative time into two pieces: * *Set it to eight hours from now *Set it to 4pm echo (new DateTime("+8 hours"))->modify('4pm')->format('Y-m-d H:i:s'); Output: 2020-06-11 16:00:00
doc_520
1 9 8 4 A: can you try something like... .verticaltext{ font: bold 13px Arial; width: 15px; writing-mode: tb-rl; } A: Try this: .rotate { /* Safari */ -webkit-transform: rotate(-90deg); /* Firefox */ -moz-transform: rotate(-90deg); /* Internet Explorer */ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3); }
doc_521
Here is a toy example: import xarray as xr import numpy as np # 1. Generate a sample DataArray with missing values dims = ('y', 'x', 't') shape = (1000, 1000, 10) coords = {d: np.arange(s) for d, s in zip(dims, shape)} mask = np.random.randint(0, 2, shape) data = np.where(mask, np.random.rand(*shape), np.nan) da = xr.DataArray(data, dims=dims, coords=coords) # 2. Write and reload from disk as dask array da.to_netcdf('_tmp.nc') da = xr.open_dataarray('_tmp.nc', chunks={'y': 100, 'x': 100, 't': 1}) # 3. Iteratively fill gaps for t in range(1, len(da['t'])): # The following doesn't work with dask arrays da[{'t': t}] = da[{'t': t}].fillna(da[{'t': t-1}]) This would work fine, except dask arrays don't support item assignment and hence the last line doesn't work. My dataset is too large to read into memory, so calling .load() is not an option. Is there any way to use .fillna() in this way while still making use of the lazy evaluation of chunks provided through dask? My real data is about 10000x10000x100 and contains multiple variables. A: At the moment, this sort of operation is only partially supported in Xarray. Ideally, you could use da.ffill() but there are some existing problems with the implementation that may not give you the desired result (to be explicit, xarray does not currently support filling between chunks). You may want to take a look at this GitHub issue to try out a potential workaround: https://github.com/pydata/xarray/issues/2699. I encourage you to engage on this issue if it seems like what you're going for.
doc_522
How do I go about this? Can I tell the client to include the firebase JWT in every request to my backend, so that the backend knows the user is logged in? (This is necessary so that the backend will not redirect a logged-in user to the login page, for example.) Background Research: The firebase authentication docs explain how to get the firebase token, send it to your custom backend, and then do something on the backend with the user data. That's fine for an XHR request, where you can tell the browser to include the token as a header. I don't understand how to get the browser to include the token in a normal HTTP request to the server, like when the user opens a new tab and navigates to the admin panel at https://example.com/admin. This is a related question, but I didn't understand the answer (or at least how I could apply it to my use case). A: Here's how the good guys at jwt.io explain it: Whenever the user wants to access a protected route or resource, the user agent should send the JWT, typically in the Authorization header using the Bearer schema. The content of the header should look like the following: Authorization: Bearer <token> This is a stateless authentication mechanism as the user state is never saved in server memory. The server's protected routes will check for a valid JWT in the Authorization header, and if it's present, the user will be allowed to access protected resources.
doc_523
ex: This will play on AirPods but not Show Media info on Lock Screen try AVAudioSession.sharedInstance().setCategory( AVAudioSession.Category.playback, options: AVAudioSession.CategoryOptions.defaultToSpeaker ) try AVAudioSession.sharedInstance().setActive(true) Also set category without options will not apply media info: ex: This will not play on AirPods but Show Media info on Lock Screen try AVAudioSession.sharedInstance().setCategory(AVAudioSession.Category.playback) try AVAudioSession.sharedInstance().setActive(true) A: Passing .defaultToSpeaker is very strange if you want it to play to AirPods. That said, it doesn't actually matter, since .defaultToSpeaker doesn't impact .playback at all. It is only intended to be used with .playAndRecord. When using it with unsupported modes, you may find it has unusual behaviors. (.defaultToSpeaker doesn't mean "play on the phone speaker." It turns on a variety of routing behaviors that you are likely not expecting.) The actual issue here is that you haven't allowed A2DP, which is what you want for AirPods. For that, the option is .allowBluetoothA2DP. A2DP is a high-quality unidirectional protocol, so you can only use it for playing audio, not when you're also trying to record audio (but that seems to be what you're doing here). AVAudioSession options are fairly complex and subtle. They are not obvious at all, and the names in some cases are even misleading. You should not try to guess which ones to use, or copy example code without understanding what it is doing. You need to read the docs for this.
doc_524
I noticed that in the case when they are first shown (and required) and then hidden, UNREGISTER_FIELD is called but the syncErrors is not updated and still includes as invalid the field which has just been unregistered; this is the action { type: '@@redux-form/UNREGISTER_FIELD', meta: { form: 'registerAccommodationForm' }, payload: { name: 'property.breakfastPrice', destroyOnUnmount: false } } and this is this state (which has not changed): form: { registerAccommodationForm: { syncErrors: { property: { breakfastPrice: 'Required' } }, My form is initialized in this way: FormPropertyDetails = reduxForm({ form: 'registerAccommodationForm', destroyOnUnmount: false, })(FormPropertyDetails); Is this a normal behavior? Shouldn't the validation be updated when fields unregister? How could one trigger an update of syncErrors when dynamic fields are shown/hidden? I'm using version 6.7.0. A: I found the solution, I just needed to add forceUnregisterOnUnmount: true in form initialization: FormPropertyDetails = reduxForm({ form: 'registerAccommodationForm', forceUnregisterOnUnmount: true, destroyOnUnmount: false, })(FormPropertyDetails); In this way the state of my wizard is preserved but fields values and syncErrors are cleared when a field is unmounted. You can find a sandbox here
doc_525
When I go to use a assignment that was defined in another function that I made, the assignment does not work. My problem is in a long line of code but I made a smaller version to help explain what is happening. def test(): """ takes input """ f = input("1 or 2? ") if f == 1: t = "wow" if f == 2: t = "woah" def test2(t): """ Uses input """ print(t) def main(): test() test2(t) main() input("\n\nPress enter to exit" ) I am not sure why the program wont use the assignment "t" after it selects an input. My goal is to use the input from the 1st function to change the outcome of the second function. of course my original program is more complicated that a simple print function but this demonstration is what I know is messing up my program. My original program deals with opening .txt files and the input is choosing which file to open. Any help would be greatly appreciated. A: You have to return "t" in order to use it in test2: def test(): """ takes input """ f = input("1 or 2? ") if f == '1': t = "wow" if f == '2': t = "woah" return t # This returns the value of t to main() def test2(t): """ Uses input """ print(t) def main(): t = test() # This is where the returned value of t is stored test2(t) # This is where the returned value of t is passed into test2() main() input("\n\nPress enter to exit" )
doc_526
Intent intent = new Intent(Settings.ACTION_MANAGE_OVERLAY_PERMISSION, Uri.parse("package:" + activity.getPackageName())); activity.startActivityForResult(intent, REQUEST_OVERLAY_PERMISSION); This requires that the user press the back button after enabling permission, but I've noticed that some apps are able to automatically return or go to another activity after the user enables it. How is this accomplished? Is there a service listening in the background that is periodically checking to see if the user has enabled something? A: It appears that there is no way to observe this setting, so there is really no elegant solution for this. What you can do is just check the setting once per second using a Handler after you send the user to the Settings screen. You can't truly "go back" programmatically from the Settings screen, so the only option is to re-launch the Activity and clear the previous ones (otherwise it will go back to the Settings screen on back press afterwards). With the example below, within one second of enabling the setting, it will re-launch MainActivity. First define the Runnable: Handler handler = new Handler(); Runnable checkOverlaySetting = new Runnable() { @Override @TargetApi(23) public void run() { if (Build.VERSION.SDK_INT < Build.VERSION_CODES.M) { return; } if (Settings.canDrawOverlays(MainActivity.this)) { //You have the permission, re-launch MainActivity Intent i = new Intent(MainActivity.this, MainActivity.class); i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_ACTIVITY_CLEAR_TASK); startActivity(i); return; } handler.postDelayed(this, 1000); } }; Then start the Runnable when you send the user to the Settings screen: Intent intent = new Intent(Settings.ACTION_MANAGE_OVERLAY_PERMISSION, Uri.parse("package:" + getPackageName())); startActivityForResult(intent, ACTION_MANAGE_OVERLAY_PERMISSION_REQUEST_CODE); //Add this here: handler.postDelayed(checkOverlaySetting, 1000); Note that you should set a timeout or max tries so that it doesn't go on forever if the user does not enable the overlay setting. A: Another way to go back is to keep track of the request code and then finishActivity int REQ_CODE = 0; public void requestPermission(){ Intent intent = new Intent(Settings.ACTION_MANAGE_OVERLAY_PERMISSION, Uri.parse("package:" + getContext().getPackageName())); startActivityForResult(intent, REQ_CODE); } public void onPermissionChanged(){ runOnUiThread(new Runnable() { @Override public void run() { finishActivity(REQ_CODE); } }); }
doc_527
A: Installing your repository on a separate machine is probably a better idea, since at a minimum, it will allow your source code to survive a hard drive crash on your development machine. If you're new to SVN, you can't beat the free e-book from Red-Bean and O'Reilly ... Check out "Version Control with Subversion" here: http://svnbook.red-bean.com/. A: I recommend VisualSVN Server once you're ready to install... A: There's really little point in installing on a "spare" machine. It doesn't consume any significant CPU or memory. Other good reasons to install it on your main system: * *Faster repository access; not as big an issue with SVN as CVS, but checkins, checkouts, etc will be significantly faster with a local repo than one over the network. *More likely to be backed up. You are backing up your dev box, right? Right? If not, there's a really good reason to. And usually boxes that you work on regularly are more likely to get backed up than ones sitting off in a corner somewhere. *Less power consumption, presuming the "spare" box is otherwise off. *As a really minor point, you won't have to muck around with network-based access, but this really isn't difficult in the first place. The only good reason I can think to have it on a separate box is a single point of failure. If your Vista box kicks the bit bucket, then you're dead in the water. But hey, you were backing it up. Right? RIGHT? A: In my opinion you HAVE to install it on another machine, and preferably one offsite and available over the internet. Doing it on another machine provides several advantages: * *You can do whatever you like to your dev machine config-wise and not worry about hosing your svn installation *The repo acts as a backup of your code, so if you have some sort of disaster you can get your code back *If the machine is available over the internet, you can work on your code anywhere on any machine *You can easily ask people to look at your code by checking it out from the SVN. They may even contribute some code back! *For me at least, there's some sort of significance to checking in the code. I think if the repo was on another machine, you would make sure your code was worth committing first. Perhaps look at one of the free hosted services, like assembla.com. Have fun! A: I currently use a hosted SVN server, this frees me from all the installation issues. I have also the benefit of having an off-site backup, so if my office gets on fire my source code will be safe. Dreamhost hosts SVN even in the cheapest plan and you can install it with a single-click, no needing of SVN configuration knowledge is required. A: Consider grabbing the Buildix application server from Thoughtworks & run it in a VM. You'll get a SVN server as well as a bunch of other goodies and, if you're ready to commit to it, you can consider installing it on a second box. A: I'd prefer it on a different machine for flexibility (you could use a different system or get a new machine without impacting the repository) and for safety/security. By having it on a different machine, you eliminated the chance of losing everything if your machine dies. A: If it's just for your personal projects and you always use the same machine anyway, just install it on the same machine. That is simpler. A: Alternatively, you can install the repository on your development machine and use any file-replication utility to replicate it on a backup machine. I personally use Foldershare (http://foldershare.com) to replicate my repo. A: If you you install it on a remote machine you will also need to install a server service to get and send files between the development environment and your repository. This can be done with the svnserve daemon or by modifying A httpd server like apache. If you set it up on the local machine you don't need to know or do any of the above, just run the install and then use your favourite client to interact with it. A: If you have it on the same machine it is trivially easy to get up and running. You don't have to worry about how you're going to connect to the repository, whether you're going to secure the connection, etc. By trivially easy, I mean, svnadmin create c:\repo. If you set it up on a separate machine and work out the connectivity issues ahead of time, you will save yourself some time down the road if you get a new development machine or if someone else starts working with you. As for backups, it's true that having the repo on a separate machines means that you have some of the code in two places. However, you still need backups for your repository. Otherwise, when your repository goes down, you'll only have the versions of the files that you have checked out. History (and isn't that the point of version control) will be lost forever. A: If you are installing it yourself on windows may I recommend SlikSVN http://www.sliksvn.com/en/download/ it'sa lot easier than configuring apache. They also do hosted SVN but I don't have experience of them A: An off-site hosted SVN is probably the easiest way of getting it set up. CVSDude actually offers one free repository of 2mb for free. ProjectLocker, CVSDude, and DreamHost all offer paid plans which cost a nominal fee ($5-$20) per month, and provide you with the ability to open the SVN repository to other users, and provide trac, and a few other services. If you want to set it up at home, and you have a spare computer that you feel comfortable leaving running all the time, then certainly that's a better choice, giving you an automatic backup, as well as having rollback capabilities. 100MB ethernet is plenty fast, and even Wireless G shouldn't have any speed problems with SVN. Local SVN doesn't give you much except for rollback, which is better than nothing, but still not perfect. A: IMHO, you can install it on your machine to avoid maintenance of a second machine. You also should backup your repository on a network drive or on an external media to avoid data loss. Once you have your data in a safe place, you can very easily reinstall subversion on any available machine with any OS on it. My company's Subversion installed on windows too, and I'm using hot-backup.py which runs once an hour from scheduler.
doc_528
in the main stage , group root i cant add the file explorer as it's not a javafx node FileExplorer fe = new FileExplorer(new File("D:/")); myVbox.getChildren().add(fe); i get this error : The method add(Node) in the type List<Node> is not applicable for the arguments (FileExplorer) A: Using Swing in JavaFX application has limitations and should be avoided. There are two ways to solve your problem: * *Create a Swing application with 2 parts: FileExplorer and JFXPanel for all JavaFX content. See http://docs.oracle.com/javafx/2.0/api/javafx/embed/swing/JFXPanel.html *Use JavaFX TreeView control and populate it with filesystem info yourself.
doc_529
It works perfectly in Chrome and Firefox but not on Safari. The website doesn't even load. It always give the error "A problem repeatedly occurred with "http://franciscouto.pt/woodkid/" Reload webpage. Thanks in advance
doc_530
Below is my code: <com.google.android.material.textfield.TextInputLayout android:id="@+id/cardNumberTil" style="@style/AuthTheme.NumberEditText" android:layout_width="match_parent" android:layout_height="wrap_content" android:layout_marginTop="32dp" app:hintTextAppearance="@style/CaptionLink"> <com.google.android.material.textfield.TextInputEditText android:layout_width="match_parent" android:layout_height="wrap_content" android:hint="Some hint here" android:imeOptions="actionDone" android:inputType="number" android:maxLines="1" android:paddingStart="45dp" android:text="12345678" android:textAppearance="@style/Body1Style" tools:ignore="HardcodedText" /> </com.google.android.material.textfield.TextInputLayout> Here are styles: <style name="Body1Style"> <item name="android:fontFamily">@font/sans_serif_regular</item> <item name="android:textSize">16sp</item> <item name="android:lineSpacingExtra">8sp</item> <item name="android:textColor">#000000</item> </style> <style name="CaptionLink"> <item name="android:fontFamily">@font/roboto_regular</item> <item name="android:textSize">12sp</item> <item name="android:textStyle">normal</item> <item name="android:lineSpacingExtra">8sp</item> <item name="android:textColor">#0089d0</item> </style> <style name="AuthTheme.NumberEditText" parent="Widget.MaterialComponents.TextInputLayout.OutlinedBox"> <!-- Hint color and label color in FALSE state --> <item name="android:textColorHint">@color/textColorSubtitle1Gray</item> <!-- Label color in TRUE state and bar color FALSE and TRUE State --> <item name="colorAccent">@color/cerulean</item> <item name="colorControlNormal">@color/cerulean</item> <item name="colorControlActivated">@color/cerulean</item> <item name="android:fontFamily">@font/roboto_regular</item> <item name="android:textSize">12sp</item> <item name="android:textStyle">normal</item> <item name="android:lineSpacingExtra">8sp</item> </style> desired output picture
doc_531
CSS ul { width: 160px; display: block; white-space: nowrap; overflow: auto; vertical-align: top; } li { width: 80px; display: inline-block; } HTML <div> <ul> <li>one</li><li>two</li><li>three</li><li>four<br />Test</li> </ul> </div> I need make one, two, three at top like four. Please help Example here http://jsfiddle.net/Y7PhV/ Thank you A: Vertical align your LIs instead of the UL li {vertical-align:top;} http://jsfiddle.net/Y7PhV/88/ A: This css should work... ul { width: 80px; display: block; /white-space: nowrap;/ /overflow: auto;/ } li { background: papayaWhip; height: 40px; width: 80px; display: inline-block; }
doc_532
function f(a = 0, b = 0, c = 0) { // Do something } f(b=3, c=4); Can I do something like this in PHP? Thank you very much indeed! A: Indeed. function foo($bar = "test") { echo "I want ".$bar; } A: No, unfortunately you can't "step over" arguments in PHP, so default values are only possible at the end of the list. So you can write this: function foo($a=0, $b=0) {} foo(42); Here $b will have its default value but $a won't, as we provided one value as input. However there's no way of providing a value for $b without also providing one for $a - we have to provide the value for $b as the second parameter, and PHP has no keyword we can use in place of the first parameter to say "use default". There is talk of adding named parameters to a future version of PHP, similar to your non-PHP example. You can simulate this a bit with some changes to your code, though; a couple of ideas: * *treat null as meaning "use default", and write $a = is_null($a) ? 42 : $a *make your functions take an associative array as their only parameter, and take values from it as though their keys were parameter names A: In PHP this is not possible: see the PHP Manual. Function parameters are always evaluated from left to right. However, f($b=3, $c=4); is possible, but does something different as you will expect: before the function f() is called, the arguments are evaluated (variable $b and $c get assigned with the values 3 and 4 resp.) and then the function is called as f(3,4) As side effect, the variables $b and $c are set to the new values.
doc_533
My request is made of 10 different select . The actual production query is taking 36sec to execute. If I display the execution plan, for one select I have a query cost of 18%. So I change a in clause (in this select) with an xml query (http://www.codeproject.com/KB/database/InClauseAndSQLServer.aspx). The new query now takes 28 sec to execute, but sql server tells me that the above select has a query cost of 100%. And this is the only change I made. And there is no parallelism in any query. PRODUCTION : 36sec, my select is 18% (the others are 10%). NEW VERSION : 28sec, my select is 100% (the others are 0%). Do you have any idea how sql server compute this "query cost" ? (I start to believe that it's random or something like that). A: Query cost is a unitless measure of a combination of CPU cycles, memory, and disk IO. Very often you will see operators or plans with a higher cost but faster execution time. Primarily this is due to the difference in speed of the above three components. CPU and Memory are fairly quick, and also uncommon as bottlenecks. If you can shift some pressure from the disk IO subsystem to the CPU, the query may show a higher cost but should execute substantially faster. If you want to get more detailed information about the execution of your specific queries, you can use: SET STATISTICS IO ON SET STATISTICS TIME ON This will output detailed information about CPU cycles, plan creation, and page reads (both from disk and from memory) to the messages tab.
doc_534
Here is my table <table id="table1" > <caption>Affectation d'Opacif</caption> <!--Header du tableau (1ere ligne avec intitulés des champs : )--> <thead> <tr> <th>Date</th> <th>N° Client</th> <th>Nom</th> <th>Naf</th> <th>Siret</th> <th>Raison Sociale</th> <th>Opacif</th> </tr> </thead> <!--Body du tableau --> <tbody> @for (int i = 0; i < Model.beneficiaries.Count; i++) { <tr> <td>@Model.beneficiaries[i].date</td> <td class="taille100">@Model.beneficiaries[i].id</td> <td>@Model.beneficiaries[i].lastname <br /> @Model.beneficiaries[i].firstname </td> <td class="taille80">@Model.beneficiaries[i].naf</td> <td>@Model.beneficiaries[i].siret</td> <td class="taille300">@Model.beneficiaries[i].raisonsociale</td> <td class="taille200">@Html.DropDownListFor(m => m.beneficiaries[i].opacif, new SelectList(Model.opacifs), "Selectionner un Opacif", new { @onchange = "CallChangefunc(this)", @naf= @Model.beneficiaries[i].naf, @siret= @Model.beneficiaries[i].siret })</td> </tr> } </tbody> </table> Here is my controller : ` public ActionResult Index() { var service = new OpacifService(); var beneficiaries = service.searchBeneficiaries(); var opacifs = service.searchOpacifs(); var viewModel = new OpacifViewModels { opacifs = opacifs, beneficiaries = beneficiaries }; return View(viewModel); } [HttpGet] public JsonResult UpdateOpacif(string naf, string opacif, string siret) { JsonResult result = null; var service = new OpacifService(); var valid = service.updateBeneficiary(naf, opacif, siret); if (valid) { result = Json(new { code = 200, message = "Mise à jour effectuée !" }, JsonRequestBehavior.AllowGet); } else { result = Json(new { code = 417, message = "Une erreur est survenue lors de la mise à jour... Réessayez ou contactez l'administrateur" }, JsonRequestBehavior.AllowGet); } return result; } }` A: Might I suggest also foreach (var bc in Model.beneficiaries) { <tr> <td>@bc.date</td> <td class="taille100">@bc.id</td> <td>@bc.lastname <br /> @bc.firstname </td> <td class="taille80">@bc.naf</td> <td>@Model.beneficiaries[i].siret</td> <td class="taille300">@bc.raisonsociale</td> <td class="taille200">@Html.DropDownListFor(m => m.beneficiaries[i].opacif, new SelectList(Model.opacifs), "Selectionner un Opacif", new { @onchange = "CallChangefunc(this)", @naf= @Model.beneficiaries[i].naf, @siret= @bc.siret })</td> </tr> } A: You can try something like this: 1) Change the Table Header and add a link to open the same Action with some route values ... // @Html.ActionLink(Linktext, Actionname, Controllername, Routevalues, Html Attributes) <th> @Html.ActionLink("Age", "clients", "home", new { sortBy= "age" }, null) </th> <th> @Html.ActionLink("Name", "clients", "home", new { sortBy= "name" }, null) </th> ... Please read this, to get a better understanding, of what the @Html.ActionLink Method actually does. The example above will produces this Markup: <a href="/home/clients/age">Age</a> 2) Now you need to add the logic to your Action in the Controller public ActionResult ActionName(string sortBy) { // Do whatever is needed to collect the values. Let's call it valueCollection Model.beneficiaries = valueCollection; switch(sortBy) { case "age": Model.beneficiaries.orderBy(client => client.age); break; case "name": Model.beneficiaries.orderBy(client => client.name); break; } // You can add more sort options of course } There is some guessing in my answer, since you only provided the markup, but your Controller actually should look like the one above. I hope you can adapt something. If you have any questions, let me know.
doc_535
set root=C:\Users\***\Anaconda3 call %root%\Scripts\activate.bat call conda activate dsatprediction call streamlit run "C:\Users\***\PycharmProjects\dsatpred\app.py" When the file is run, it results in The same app runs using the command streamlit run app.py on terminal. Any guidance is appreciated A: I don't know what causes the error you see, but my batch file is done differently, maybe it can help you. I have gotten this code to run my files: call C:\[anaconda_path_goes_here]\envs\[env_name_goes_here]\Scripts\activate.bat C:\[anaconda_path_goes_here]\envs\[env_name_goes_here]\Scripts\streamlit.exe run [path_to_streamlit_file]\[name_of_streamlit_file].py I install conda in my env and run the activate command directly in that env. Maybe it makes a difference. On the other hand, my streamlit apps don't open an files from the disk, so it may not solve your problem. A: I managed to make mine work this way : @ECHO OFF set root=C:\Anaconda3 call %root%\Scripts\activate.bat cd [PATH_TO_YOUR_STREAMLIT_APP_FOLDER] pip install -r requirements.txt call streamlit run app.py Be sure to have Anaconda installed. Give that path to the root variable. Activate that conda environment. The path to your streamlit app is only to the folder, finished by a “” and does not include your .py script. This does not need to be between " " or between . Just copy-paste the path. I added the pip install -r requirements to force the update of the python libraries needed to run my app. This is not mandatory, but just makes your life easier. Finally, I simply call my app. Your file can be located anywhere and the app runs fine. A: @ECHO OFF call "C:\[full path the app folder]\venv\scripts\activate.bat" cd C:\[full path to app folder] streamlit run app.py
doc_536
def findContainerByUID(uid: String): UserToolbar = { var userToolbar = MorphiaHelper .getDataStore().find(UserToolbar.getClass(), "uid", uid).get() userToolbar.asInstanceOf[UserToolbar] } However, when i try to compile, i got the following error. ow can getCommonSuperclass() do its job if different class symbols get the same bytecode-level internal name:xxxxxx What is the problem? A: Replace UserToolbar.getClass() with classOf[UserToolbar]. UserToolbar.getClass() returns the class of the UserToolbar companion object, not the class UserToolbar itself.
doc_537
For example I have: tmp = cbind("GAD", "AB") tmp # [,1] [,2] # [1,] "GAD" "AB" My goal is to concatenate the two values in "tmp" to one string: tmp_new = "GAD,AB" Which function can do this for me? A: help.search() is a handy function, e.g. > help.search("concatenate") will lead you to paste(). A: paste() is the way to go. As the previous posters pointed out, paste can do two things: concatenate values into one "string", e.g. > paste("Hello", "world", sep=" ") [1] "Hello world" where the argument sep specifies the character(s) to be used between the arguments to concatenate, or collapse character vectors > x <- c("Hello", "World") > x [1] "Hello" "World" > paste(x, collapse="--") [1] "Hello--World" where the argument collapse specifies the character(s) to be used between the elements of the vector to be collapsed. You can even combine both: > paste(x, "and some more", sep="|-|", collapse="--") [1] "Hello|-|and some more--World|-|and some more" A: For the first non-paste() answer, we can look at stringr::str_c() (and then toString() below). It hasn't been around as long as this question, so I think it's useful to mention that it also exists. Very simple to use, as you can see. tmp <- cbind("GAD", "AB") library(stringr) str_c(tmp, collapse = ",") # [1] "GAD,AB" From its documentation file description, it fits this problem nicely. To understand how str_c works, you need to imagine that you are building up a matrix of strings. Each input argument forms a column, and is expanded to the length of the longest argument, using the usual recyling rules. The sep string is inserted between each column. If collapse is NULL each row is collapsed into a single string. If non-NULL that string is inserted at the end of each row, and the entire matrix collapsed to a single string. Added 4/13/2016: It's not exactly the same as your desired output (extra space), but no one has mentioned it either. toString() is basically a version of paste() with collapse = ", " hard-coded, so you can do toString(tmp) # [1] "GAD, AB" A: As others have pointed out, paste() is the way to go. But it can get annoying to have to type paste(str1, str2, str3, sep='') everytime you want the non-default separator. You can very easily create wrapper functions that make life much simpler. For instance, if you find yourself concatenating strings with no separator really often, you can do: p <- function(..., sep='') { paste(..., sep=sep, collapse=sep) } or if you often want to join strings from a vector (like implode() from PHP): implode <- function(..., sep='') { paste(..., collapse=sep) } Allows you do do this: p('a', 'b', 'c') #[1] "abc" vec <- c('a', 'b', 'c') implode(vec) #[1] "abc" implode(vec, sep=', ') #[1] "a, b, c" Also, there is the built-in paste0, which does the same thing as my implode, but without allowing custom separators. It's slightly more efficient than paste(). A: > tmp = paste("GAD", "AB", sep = ",") > tmp [1] "GAD,AB" I found this from Google by searching for R concatenate strings: http://stat.ethz.ch/R-manual/R-patched/library/base/html/paste.html A: Alternatively, if your objective is to output directly to a file or stdout, you can use cat: cat(s1, s2, sep=", ") A: Consider the case where the strings are columns and the result should be a new column: df <- data.frame(a = letters[1:5], b = LETTERS[1:5], c = 1:5) df$new_col <- do.call(paste, c(df[c("a", "b")], sep = ", ")) df # a b c new_col #1 a A 1 a, A #2 b B 2 b, B #3 c C 3 c, C #4 d D 4 d, D #5 e E 5 e, E Optionally, skip the [c("a", "b")] subsetting if all columns needs to be pasted. # you can also try str_c from stringr package as mentioned by other users too! do.call(str_c, c(df[c("a", "b")], sep = ", ")) A: glue is a new function, data class, and package that has been developed as part of the tidyverse, with a lot of extended functionality. It combines features from paste, sprintf, and the previous other answers. tmp <- tibble::tibble(firststring = "GAD", secondstring = "AB") (tmp_new <- glue::glue_data(tmp, "{firststring},{secondstring}")) #> GAD,AB Created on 2019-03-06 by the reprex package (v0.2.1) Yes, it's overkill for the simple example in this question, but powerful for many situations. (see https://glue.tidyverse.org/) Quick example compared to paste with with below. The glue code was a bit easier to type and looks a bit easier to read. tmp <- tibble::tibble(firststring = c("GAD", "GAD2", "GAD3"), secondstring = c("AB1", "AB2", "AB3")) (tmp_new <- glue::glue_data(tmp, "{firststring} and {secondstring} went to the park for a walk. {firststring} forgot his keys.")) #> GAD and AB1 went to the park for a walk. GAD forgot his keys. #> GAD2 and AB2 went to the park for a walk. GAD2 forgot his keys. #> GAD3 and AB3 went to the park for a walk. GAD3 forgot his keys. (with(tmp, paste(firststring, "and", secondstring, "went to the park for a walk.", firststring, "forgot his keys."))) #> [1] "GAD and AB1 went to the park for a walk. GAD forgot his keys." #> [2] "GAD2 and AB2 went to the park for a walk. GAD2 forgot his keys." #> [3] "GAD3 and AB3 went to the park for a walk. GAD3 forgot his keys." Created on 2019-03-06 by the reprex package (v0.2.1) A: Another way: sprintf("%s you can add other static strings here %s",string1,string2) It sometimes useful than paste() function. %s denotes the place where the subjective strings will be included. Note that this will come in handy as you try to build a path: sprintf("/%s", paste("this", "is", "a", "path", sep="/")) output /this/is/a/path A: You can create you own operator : '%&%' <- function(x, y)paste0(x,y) "new" %&% "operator" [1] newoperator` You can also redefine 'and' (&) operator : '&' <- function(x, y)paste0(x,y) "dirty" & "trick" "dirtytrick" messing with baseline syntax is ugly, but so is using paste()/paste0() if you work only with your own code you can (almost always) replace logical & and operator with * and do multiplication of logical values instead of using logical 'and &' A: Another non-paste answer: x <- capture.output(cat(data, sep = ",")) x [1] "GAD,AB" Where data <- c("GAD", "AB") A: Given the matrix, tmp, that you created: paste(tmp[1,], collapse = ",") I assume there is some reason why you're creating a matrix using cbind, as opposed to simply: tmp <- "GAD,AB"
doc_538
How to do that? A: * *Get the mp3plugin.jar of the JMF. *Add it to the run-time class-path of the app. to provide a decoder SPI for MP3. *Get an AudioInputStream for the MP3 from the AudioSystem of Java Sound. *Convert it to PCM using getAudioInputStream(AudioFormat,AudioInputStream).
doc_539
I noticed that any line that contains dynamic allocation(new and delete), does not get full coverage even if the code runs through this lines. I've made a simple example to demonstrate the problem: lets say i have the following class: class DummyClass // fully covered { public: int x; DummyClass() { x = 5; }; // fully covered ~DummyClass() { }; // fully covered }; Now i have the following function that allocate this class statically and dynamically: static void CoverageFunctionTest() { // any logic here DummyClass staticVar1; // fully covered DummyClass* dynamicVar = new DummyClass(); // partially covered // any logic here DummyClass staticVar2; // fully covered delete dynamicVar; // partially covered } The line with the "new" keyword and the line with the "delete" keyword are considered as partially covered and not fully covered. The coverage for this function is: 88.24% even though we covered every single line in it, and the coverage for the class is 100%. How can i get 100% code coverage for this function? Ty!
doc_540
javac command runs fine, but java command opens another command window and shows java command options and then immediately disappears. the title of that new window shows its executing some java.hwd file present in the bin folder which contains all java tools for application development. Please help. I am using jdk1.6 . A: Odds are someone put in a script with the name of java, java.cmd, java.bat, etc. in the path just prior to the actual java executable. Typically such scripts contain a call to the true java command, but sometimes due to lack of specifying a fixed path, they might actually call themselves. This script likely was meant as a wrapper around the actual java command to perhaps turn on some sort of java debugging or extra functionality. The only problem is that either the script wasn't removed after testing, the debugging interferes with your normal operation, or the person who did this didn't take care to make his changes only affect him.
doc_541
var cfWin = new Ext.Window({ id:'idNYC', el:'NYC', ......... contentEl:'NYCData', title:'NYC Data', ........ }); While I do understand what 'id' would do;why would we need the el and contentel for? When we have a index.html that loads the above script,do we need a div of the same name(id/el) in the html? A: From the official ExtJS documentation, Specify an existing HTML element, or the id of an existing HTML element to use as the content for this component. This means that if you have a div somewhere in your index.html, let say: <div id="datalist"> <ul> <li>Data 1</li> <li>Data 2</li> </ul> </div> And you specify datalist in contentEl, those html content will appear on your Window. You can also specify the html directly: contentEl: "<h1>This is my content</h1>". You cannot specify the el as it is a read-only property.
doc_542
if (obj.getMax() < obj.getMin()) { int temp = obj.getMax(); obj.setMax(obj.getMin()); obj.setMin(temp); } obj.setMax(obj.getMax() + 1); int currentMax = getMax(); when I can just do: obj.enforceMinMaxOrder(); int currentMax = obj.incrementAndGetMax(); Here's an abbreviated example. How can I use Byte Buddy to make the above two work? Presumably using an interceptor class, but I have not come across any examples that modify fields. Important things: * *the fields are not known at compile time, they will be extracted from the method names (i.e. "enforceMinMaxOrder" method name uses "min" and "max" as the field names) *I'd like to avoid runtime reflection used after Byte Buddy generates the classes, but holding references to fields or methods and invoking those is fine. DynamicType.Builder<? extends Object> builder = new ByteBuddy().subclass(Object.class); builder = builder.implement(MyInterface.class); builder = builder.method( (isGetter().or(isSetter()))) .intercept(FieldAccessor.ofBeanProperty()); for (Method m : MyInterface.class.getMethods()) { String methodName = m.getName(); Class<?> t = m.getReturnType(); if (t != void.class && methodName.startsWith("get")) { String fieldName = Character.toLowerCase(methodName.charAt(3)) + methodName.substring(4); builder = builder.defineField(fieldName, t, Visibility.PUBLIC); builder = builder.method(named(fieldName)).intercept(FieldAccessor.ofField(fieldName)); } } MyInterface obj = (MyInterface) builder.make().load(BeanUtil.class.getClassLoader()).getLoaded().getConstructor().newInstance(); obj.setMin(10); obj.setMax(5); obj.incrementAndGetMin(); obj.ensureMinMaxOrder(); // now, the values should be min=5, max=11 A: I think your best call would be using Advice which let's you write code templates that are later inlined into a generated class: class MinMaxAdvice { @Advice.OnMethodExit static void minMax(@Min int min, @Max int max) { ... } } The method implements your code and can override any field values. Since your resolution is method-name sensitive, you would need to add your own bindings for custom annotations @Min and @Max, have a look at the Advice.withCustomBinding() API to learn how this is done. You can reuse the field bindings that exist already, but you have to locate those fields in the manner you want to achieve this.
doc_543
import Utils class HelpController { def search = { Utils.someFunction() } } Here is my spock groovy spec: import Utils import grails.test.mixin.* import HelpController @TestMixin(GrailsUnitTestMixin) @TestFor(HelpController) class HelpControllerSpec extends Specification { void "should call someFunction method in Utils class"() { when: helpController.search() then: 1 * Utils.someFunction() } } Running the test results in the error: too few invocations for Utils.someFunction() (0 invocations) Utils is a java class. When I step through the spock unit test, it appears that Utils.someFunction() is called so I am a bit confused as to what may be going on. Can anyone advise? Thanks in advance! A: You have to implement that method, cause you are running a unit test, and the grails application is not running. Use the annotation @ConfineMetaClassChanges for cleaning the metaclass of the class Utils import Utils import grails.test.mixin.* import HelpController import spock.util.mop.ConfineMetaClassChanges @TestMixin(GrailsUnitTestMixin) @TestFor(HelpController) @ConfineMetaClassChanges([Utils]) class HelpControllerSpec extends Specification { setup(){ Utils.metaClass.someFunction = { //expected response } } void "should call someFunction method in Utils class"() { when: helpController.search() then: 1 * Utils.someFunction() } }
doc_544
THanks A: Step1: Add named range myList with formula: =INDEX(FINAL[#Data],2,0) Step2: Select any cell and add data validation with Source: =myList Result: A: I know you have found a solution, but I would recommend using the Indirect function instead of creating an unnecessary named range. You can't reference a table directly by name within the data validation formulas, but you can within the Indirect function, such as: =INDEX(INDIRECT("FINAL[#Data]"),2,0) This would provide you with the same list but without the need for a named range.
doc_545
Code: x = tf.placeholder(tf.float32, shape=[None, 1]) # Why must this be a float? y = tf.placeholder(tf.int32, shape=[None, 2]) with tf.name_scope("network"): layer1 = tf.layers.dense(x, 100, activation=tf.nn.relu, name="hidden_layer") output = tf.layers.dense(layer1, 2, name="output_layer") with tf.name_scope("loss"): xentropy = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output) loss = tf.reduce_mean(xentropy, name="loss") with tf.name_scope("train"): optimizer = tf.train.AdamOptimizer() training_op = optimizer.minimize(loss) with tf.name_scope("eval"): with tf.Session() as sess: for i in range(1, 50): sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() sess.run(training_op, feed_dict={x: np.array(train_data).reshape([-1, 1]), y: label}) if i % 10 == 0: saver.save(sess, "saved_models/testing") print "Saved" When I change it to tf.int32, it gives the following error: TypeError: Value passed to parameter 'features' has DataType int32 not in list of allowed values: float16, float32, float64 I can provide more code if needed. A: This is due to tf.nn.softmax_cross_entropy_with_logits: logits and labels must have the same shape [batch_size, num_classes] and the same dtype (either float16, float32, or float64). I suppose you could compute a loss with integer inputs. However, most of the time, this loss is minimized by gradient descent -- as you do -- which means inputs needs to encode real numbers to get arbitrary updates. The thing is that tf.layers.dense won't change the type of your input. So it will produce an integer output it its input is an integer. (At least if the activation is compatible with integers, such as relu -- a sigmoid would raise an error). What you probably wanted to do is provide integer inputs then do all computations in say tf.float32. To do this, cast your input first before providing it to dense: layer1 = tf.layers.dense(tf.to_float(x), 100, activation=tf.nn.relu, name="hidden_layer")
doc_546
https://learn.microsoft.com/en-us/xamarin/android/platform/maps-and-location/location IsGooglePlayServicesInstalled function returns true, and Xamarin.GooglePlayServices.Maps is installed. What am I doing wrong? A: The solution is: Install Xamarin.GooglePlayServices.Location nuget package. In the docs here, they didn't mention this package, I even didn't know that it exists.
doc_547
class Snake: hp=100 attack=25 defense=1 def set_name(self, name): self.name=name def shed(self): self.defense=self.defense+1 def attack(self, opposite, current): opposite.hp=opposite.hp-(current.attack-opposite.defense) def eat(self): self.attack=self.attack+5 print(str(self.name) + " eats a rat!") print(str(self.name) + "'s attack dmg is now " + str(self.attack)) def sleep(self): print (str(self.name) + " goes to sleep") self.hp=self.hp+10 if self.hp>100: self.hp=100 print (str(self.name) + " wakes up with " + str(self.hp) + "hp") ##initialises the snakes alpha=Snake() beta=Snake() ## gives the snakes names of the user's choice alpha_name=raw_input("What would you like to name your snake? ") alpha.set_name(alpha_name) beta_name=raw_input("What would you like to name the other snake? ") beta.set_name(beta_name) ##starts the game turn=True while alpha.hp>0 and beta.hp>0: while turn==True: opposite="beta" current="alpha" action=raw_input("attack, sleep, eat or shed? ") try: if action=="attack": alpha.attack(opposite, current) if action=="sleep": alpha.sleep() if action=="eat": alpha.eat() if action=="shed": alpha.shed() turn=False except IOError: print("Please chose only one action, exaclty how it is typed") while turn==False: opposite="alpha" current="beta" if beta.hp<15: beta.sleep() elif alpha.hp>75: beta.attack() else: index=random.randint(1, 3) if index==1: beta.shed() elif index==2: beta.eat() else: beta.attack(opposite, current) turn=True A: in "attack" you try to access "opposite.hp", but this method is called with a string instead of an object: opposite="alpha" current="beta" => change this to opposite=alpha current=beta also, there is a field and a method with the same name in the class: attack. I suggest renaming the field to "attackpoints" or something. additionaly, you call "beta.attack()". you forgot the method arguments there. A: I see two problems. The first is you're passing the name of the variable instead of the variable itself. change this: while alpha.hp>0 and beta.hp>0: while turn==True: opposite="beta" current="alpha" action=raw_input("attack, sleep, eat or shed? ") try: if action=="attack": alpha.attack(opposite, current) to this: while alpha.hp>0 and beta.hp>0: while turn==True: opposite=beta current=alpha action=raw_input("attack, sleep, eat or shed? ") try: if action=="attack": alpha.attack(opposite, current) Additionally, you have the attack field defined twice in the Snake class. class Snake: attack=25 def attack(self, opposite, current): Here's what I came up with after playing with your code: import random class Snake: hp=100 attack_skill=25 defense=1 def set_name(self, name): self.name=name def shed(self): self.defense=self.defense+1 def attack(self, opposite): opposite.hp = opposite.hp - (self.attack_skill - opposite.defense) def eat(self): self.attack_skill += 5 print(str(self.name) + " eats a rat!") print(str(self.name) + "'s attack dmg is now " + str(self.attack_skill)) def sleep(self): print (str(self.name) + " goes to sleep") self.hp=self.hp+10 if self.hp>100: self.hp=100 print (str(self.name) + " wakes up with " + str(self.hp) + "hp") ##initialises the snakes alpha=Snake() beta=Snake() ## gives the snakes names of the user's choice alpha_name=raw_input("What would you like to name your snake? ") alpha.set_name(alpha_name) beta_name=raw_input("What would you like to name the other snake? ") beta.set_name(beta_name) ##starts the game turn=True while alpha.hp>0 and beta.hp>0: while turn==True: opposite="beta" current="alpha" action=raw_input("attack, sleep, eat or shed? ") try: if action=="attack": alpha.attack(beta) if action=="sleep": alpha.sleep() if action=="eat": alpha.eat() if action=="shed": alpha.shed() turn=False except IOError: print("Please chose only one action, exaclty how it is typed") while turn==False: opposite="alpha" current="beta" if beta.hp<15: beta.sleep() elif alpha.hp>75: beta.attack(alpha) else: index=random.randint(1, 3) if index==1: beta.shed() elif index==2: beta.eat() else: beta.attack(alpha) turn=True A: When you beta attack, you are calling the attack() method without any parameters. I assume you want beta.attack(alpha,beta) But you could probably refactor the method to only require the opponent as a parameter (since you know who is attacking (it's the object calling the attack method)) def attack(self, opposite): opposite.hp -= self.attack-opposite.defense
doc_548
So I have classes within classes like so: public class Flow { private Action actionField private string nameField private bool enabledField ... } public class Action { private ActionSchedule actionScheduleField private ActionParameter actionParameterField private nameField } public class ActionSchedule ... And a single create view for a 'Flow' @model ProjectZeus.Models.Flow @using (Html.BeginForm()) { @Html.AntiForgeryToken() @Html.ValidationSummary(true) @Html.TextBoxFor(model => model.name, new { @placeholder = "Flow name" }) @Html.ValidationMessageFor(model => model.name) @Html.LabelFor(model => model.enabled) @Html.EditorFor(model => model.enabled) @Html.ValidationMessageFor(model => model.enabled) @Html.Partial("FlowAction") ... and then partial views for each of the subclasses @model ProjectZeus.Models.FlowAction @Html.TextBoxFor(model => model.name, new { @placeholder = "Action name" }) ... I've tried creating instances of the classes and calling the view - error, I've tried creating instances of the classes in the views themselves - error, I've tried not using PartialViews: @Html.TextBoxFor(model => model.action.name, new { @placeholder = "Action name" }) I've googled and googled and googleedddd but with no luck, help please!? Edit: Implementing a customer model binder seems like overkill. This page describes the same problem but the solution code won't compile for me ‘The name ‘helper’ does not exist in the current context’? - http://danielhalldev.wordpress.com/2013/08/23/partial-views-and-nested-mvc-model-binding/ Edit2: I changed the model defintions for brevity - the model is actually auto generated from an xsd: /// <remarks/> [System.CodeDom.Compiler.GeneratedCodeAttribute("xsd", "4.0.30319.33440")] [System.SerializableAttribute()] [System.Diagnostics.DebuggerStepThroughAttribute()] [System.ComponentModel.DesignerCategoryAttribute("code")] [System.Xml.Serialization.XmlTypeAttribute(AnonymousType = true)] public partial class D53ESBFlow { private D53ESBFlowAction actionField; [Required] private string nameField; ... private bool enabledField; /// <remarks/> public D53ESBFlowAction action { get { return this.actionField; } set { this.actionField = value; } } /// <remarks/> [System.Xml.Serialization.XmlAttributeAttribute()] public string name { get { return this.nameField; } set { this.nameField = value; Edit 3 (bump): It looks like the 'binder'is creating a property and not a class object? A: Did you forget the { get; set; } accessors on the property names? A: I had a similar issue with MVC 5, .NET 4.5, Visual Studio 2013. Here's what worked for me: Add a constructor so the contained class gets instantiated, make them properties (not variables) like AntoineLev said, and add the class to the Binding: public class Flow { public Action actionField {get; set; } public class Flow() { actionField = new Action(); // otherwise it shows up as null } } In your controller, Add the the whole class in the binding: public ActionResult Create([Bind(Include="action,name,enabled")] Flow flow) { ... } Your Mileage may vary. } A: I ended up going through the request response and mapping all the properties individually by name: flow.action = new D53ESBFlowAction { name = Request["action.name"], ... A: I had similar troubles and this article of Jimmy's Bogard helped me. Article here Look at the html generated will show that with a partial view the html doesn't include the name of the nested class so by default is unable to bind it. Giving the binding statement as in one of the answers above solves the problem
doc_549
ROOT / \ A[0] B[1] / B[0] as opposed to Graph_2: ROOT / \ A[0] \ / \ B[0] B[1] Graph_2 is what I would like to end up with. The fixed levels are what I'm looking for. How can I achieve this? I can easily identify what data type I'm adding to the graph, but am having trouble on how to tag nodes to achieve this. Can this be done using subgraphs? FYI, this is my first time playing with DOT. A: Yes, subgraphs will work. digraph { subgraph { rank = same; A0 }; subgraph { rank = same; B0; B1 }; root -> A0; A0 -> B0; root -> B1; } results in (source: brool.com)
doc_550
@Override public void onPause() { super.onPause(); shake.cancel(); } my phone can still vibrate although home button is pressed! I tried onStop(), same doesn't work.. my app is like this : countdown 10 sec, after that vibrate.. but the problem is onPause cannot be call so the user may feel where's the vibrate come from if it's set 2 minutes on the countdown ticker.. help! A: Since I can't see the rest of your code, I'm gonna assume a few things. Assumption #1 If you have your activity open, and the countdown starts and expires after 10 seconds, your phone vibrates (with your activity still open). If you go to home screen, the vibration stops. Assumption #2 You have your activity open, and the countdown starts. Before the 10 second expires, you go to home screen. Your activity is not visible, but the phone starts vibrating soon. If this is what you are seeing, it's the correct behavior. The problem is that in the 2nd case, your shake.cancel() from onPause() is called when you go to the home screen, before it actually starts vibrating. shake.cancel() can only cancel if it's already vibrating. If that's what you are trying to fix (I can only assume since I can't see the rest of your code), you can try this: private boolean mAllowShake = false; @Override public void onResume() { super.onResume(); mAllowShake = true; } @Override pulic void onPause() { super.onPause(); mAllowShake = false; shake.cancel(); } // wherever you are calling the shake.vibrate() if (mAllowShake) shake.vibrate(); This way, when your activity is not visible and your timer goes off, since mAllowShake is false, it won't actually vibrate. If that's not what you are trying to fix, please update your question with more code and description of your exact use case. Hope it helps!
doc_551
{ "authors": [ { "id": 1, "name": "Douglas Adams" }, { "id": 2, "name": "John Doe" } ], "books": [ { "name": "The Hitchhiker's Guide to the Galaxy", "author_id": 1 } ] } I would like to request the name of the author of "The Hitchhiker's Guide to the Galaxy". I've tried this JSON path but it doesn't work: $.authors[?(@.id == $.books[?(@.name == "The Hitchhiker's Guide to the Galaxy")].author_id)].name All online tools I tried indicate a syntax error which seems due to the presence of a JSON path inside my filter. Could anyone please help me figure out what's wrong and what is the right syntax? Thanks! A: When you running this filter $.books[?(@.name == "The Hitchhiker's Guide to the Galaxy")].author_id it returns an array instead of a value: [ 1 ] Syntax error occurs when you pass an array to compare with the value of id: $.authors[?(@.id == {the array value}].author_id)].name However, you may not be able to extract the value using JSONPath, depends on the language you are using. See Getting a single value from a JSON object using JSONPath
doc_552
myList([a,b,c,d,e]). I am trying to write a predicate. That predicate should give me this result: ab ac ad ae bc bd be cd ce de I found a solution that's near to my goal. But it is not exactly what I want. ?- L=[a,b,c], findall(foo(X,Y), (member(X,L),member(Y,L)), R). L = [a, b, c], R = [foo(a, a), foo(a, b), foo(a, c), foo(b, a), foo(b, b), foo(b, c), foo(c, a), foo(c, b), foo(..., ...)]. For example i dont want to aa or bb or cc. Also, there is already ac result. So i dont want to again ca. Sorry for my English. Thanks. A: ?- set_prolog_flag(double_quotes, chars). true. ?- List = "abcde", bagof(X-Y, Pre^Ys^( append(Pre, [X|Ys], List), member(Y,Ys) ), XYs). List = "abcde", XYs = [a-b,a-c,a-d,a-e,b-c,b-d,b-e,c-d,c-e,d-e]. ?- List = [A,B,C,D,E], bagof(X-Y, Pre^Ys^( append(Pre, [X|Ys], List), member(Y,Ys) ), XYs). List = [A,B,C,D,E], XYs = [A-B,A-C,A-D,A-E,B-C,B-D,B-E,C-D,C-E,D-E]. From your question it is not that evident what you want but it seems you are happy to use findall/3. Above solutions use bagof/3 which is a somewhat more civilized version of findall/3. bagof/3 takes into account variables, and thus you get the same result with concrete characters [a,b,c,d,e] or with a list of variables [A,B,C,D,E]. You have used terms foo(a,b), in such situations it is more common (and convenient) to say a-b. A: Here is another solution that does not need any of the higher-order predicates. :- set_prolog_flag(double_quotes, chars). :- use_module(library(double_quotes)). list_pairwise([], []). list_pairwise([E|Es], Fs0) :- phrase(values_key(Es, E), Fs0,Fs), list_pairwise(Es, Fs). values_key([], _K) --> []. values_key([V|Vs], K) --> [K-V], values_key(Vs, K). ?- list_pairwise("abcde", KVs). KVs = [a-b,a-c,a-d,a-e,b-c,b-d,b-e,c-d,c-e,d-e]. ?- list_pairwise(L, [a-b,a-c,a-d,a-e,b-c,b-d,b-e,c-d,c-e,d-e]). L = "abcde" ; false. ?- list_pairwise(L, [A-B,A-C,A-D,A-E,B-C,B-D,B-E,C-D,C-E,D-E]). L = [A,B,C,D,E] ; false. ?- KVs = [K1-_,K1-_,K2-_|_], dif(K1,K2), list_pairwise(Ks,KVs). KVs = [K1-K2,K1-_A,K2-_A], Ks = [K1,K2,_A], dif:dif(K1,K2) ; false. In the last query we show that a sequence starting with keys, K1, K1, K2 can only result in the sequence of three elements. A: What about a couple of predicates as follows ? printCouples(_, []). printCouples(E1, [E2 | T]) :- write(E1), write(E2), nl, printCouples(E1, T). printList([]). printList([H | T]) :- printCouples(H, T), printList(T). From printList([a, b, c, d]) you get ab ac ad bc bd cd
doc_553
Does anyone know how I might be able to do this with just HTML & CSS? Here is the HTML & CSS that I have as of right now: .header-menu { background-color: #fff; font-family: "Noto Sans", sans-serif; font-size: 18px; } .header-menu-items { display: none; z-index: 1; max-width: 100%; overflow-wrap: break-word; font-weight: 900; } .a { color: #000; padding: 10px; text-decoration: none; } .search { display: none; } .search-icon { display: none; } .header-menu-items hr { color: #d0d0d0; background-color: #d0d0d0; height: 1px; border: none; } .header-menu-btn { text-align: left; background-color: #ff3b3b; color: #fff; font-size: 20px; font-weight: 600; padding: 10px 0; border: none; cursor: pointer; width: 100%; margin-bottom: 10px; padding-left: 10px; } .header-menu:hover .header-menu-items { display: block; } <div className="header-menu"> <button className="header-menu-btn">Menu</button> <div className="header-menu-items"> <div> <a className="a" href=""> Shirts </a> <hr /> <a className="a" href=""> Pants </a> <hr /> <a className="a" href=""> Shoes </a> <hr /> <a className="a" href=""> T-shirts </a> <hr /> <a className="a" href=""> Accessories </a> <hr /> </div> </div> </div> A: Javascript onclick simulation using only css and html (without javascript) I know your answer is in the answer to this post but I can't really figure out how to do it myself. It would require changing your button to an input tag of checkbox type and going from there. A: First of all it has to be class and not className. What you want to do can be achieved with a checkbox and the sibling selector ~. .header-menu { background-color: #fff; font-family: "Noto Sans", sans-serif; font-size: 18px; } .header-menu-items { display: none; z-index: 1; max-width: 100%; overflow-wrap: break-word; font-weight: 900; } .a { color: #000; padding: 10px; text-decoration: none; } .search { display: none; } .search-icon { display: none; } .header-menu-items hr { color: #d0d0d0; background-color: #d0d0d0; height: 1px; border: none; } .header-menu-btn { text-align: left; background-color: #ff3b3b; color: #fff; font-size: 20px; font-weight: 600; padding: 10px 0; border: none; cursor: pointer; width: 100%; margin-bottom: 10px; padding-left: 10px; display: block; } #toggle-menu { display: none; } #toggle-menu:checked ~ .header-menu-items { display: block; } #toggle-menu ~ label .hide { display: none; } #toggle-menu:checked ~ label .show { display: none; } #toggle-menu:checked ~ label .hide { display: inline; } <div class="header-menu"> <input type="checkbox" id="toggle-menu"><label for="toggle-menu" class="header-menu-btn">Menu <span class="show">show</span><span class="hide">hide<span></label> <div class="header-menu-items"> <div> <a class="a" href=""> Shirts </a> <hr /> <a class="a" href=""> Pants </a> <hr /> <a class="a" href=""> Shoes </a> <hr /> <a class="a" href=""> T-shirts </a> <hr /> <a class="a" href=""> Accessories </a> <hr /> </div> </div> </div>
doc_554
@media screen and (max-device-width:360px){ div { background-color: green; } } @media screen and (min-device-width:361px){ div { background-color: blue; } } Doesn't work? If device-width is linked with screen.width shouldn't it's value change accordingly with the actual device orientation just like screen.width does? A: device-width refers to the width of the device itself, in other words, the screen resolution of the device Do you want something like this? /* Portrait */ @media screen and (device-width: 360px) and (device-height: 640px) and (-webkit-device-pixel-ratio: 3) and (orientation: portrait) { div { background-color: green; } } /* Landscape */ @media screen and (device-width: 360px) and (device-height: 640px) and (-webkit-device-pixel-ratio: 3) and (orientation: landscape) { div { background-color: blue; } }
doc_555
File starts with ----------------------------802523244934076832438189 Content-Disposition: form-data; name="file"; filename="Test1.png" Content-Type: image/png and Ends with ----------------------------802523244934076832438189-- My code is given below: var formData = { file:{ value: fs.createReadStream('./upload-folder/' + fileName), options: { filename: fileName, contentType: req.body.attachment.mimeType //mimeType from JSON } } }; var options = { url: config.deployment.incidentUrl + '/attachment?filename=' + fileName, method: "POST", headers: { ContentType: "application/json"}, json: true, formData: formData }; request(options, function (error, response, body) { if (error) { errorlog.error(`Error Message : PostAttachmentToCSMS : ${error}`); } else { successlog.info(`Attachment posted for correlation Id: ${corIdFromJira}`); } }); A: I think it's because using formData is mutually exclusive with using json: true: the data is either multipart/form-data encoded or JSON-encoded, but it can't be both. The Content-Type header also isn't correct (although it's misspelled so technically not the problem). Try this: var options = { url: config.deployment.incidentUrl + '/attachment?filename=' + fileName, method: "POST", formData: formData };
doc_556
File "/home/rajnish/anaconda3/envs/py3/lib/python3.5/site-packages/h5py/__init__.py", line 36, in <module> from ._conv import register_converters as _register_converters File "h5py/h5t.pxd", line 14, in init h5py._conv File "h5py/numpy.pxd", line 66, in init h5py.h5t AttributeError: module 'numpy' has no attribute 'dtype'
doc_557
My data.dat file looks like this: 12 4 6.1 7 14 4 8.4 62 7 56.1 75 98 9.7 54 12 35 2 4 8 7.8 To select 2 lines randomly from the data.dat here is how I am proceeding: close all; clear all; %----------------------% % Choose random lines %----------------------% M = load('data.dat'); N=2; %number_of_lines file_number = 2; %save each two lines in new file: selection_1, selection_2 Now I am saving the two selected lines in new files sequentially. for k = 1:file_number i = randi(length(M),N); B=M(i,:) filename=['samples_',int2str(k),'_mc.dat'] save (filename, 'B', '-ascii') clear B; end I don't know why but I have more than 2 lines in each new files. Could you please explain me where did I made a mistake. A: I think you are making a mistaking when you generate the random numbers, as indicated by GameOfThrows. i = randi(length(M),N); % gives you a matrix NxN of numbers i = randi(length(M),[N,1]); % gives you a column of N numbers
doc_558
Added angular js script and percent-circle-directive script as mentioned here https://www.npmjs.com/package/angular-percent-circle-directive Here is my html code - <!DOCTYPE html> <html> <head> <title>My dashboard</title> <script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.6.4/angular.min.js"></script> <link rel="stylesheet" type="text/css" href="C:\Users\Shantanu\node_modules\angular-percent-circle-directive\dist\percent-circle.css"> <link rel="stylesheet" type="text/css" href="css/bootstrap.css"> <link rel="stylesheet" type="text/css" href="css/main.css"> </head> <body> <script src="C:\Users\Shantanu\node_modules\angular-percent-circle- directive\dist\percent-circle-directive.js" type="text/javascript"></script> <script type="text/javascript" src="ang.js"></script> <script src="https://code.jquery.com/jquery-3.2.1.js" integrity="sha256-DZAnKJ/6XZ9si04Hgrsxu/8s717jcIzLy3oi35EouyE=" crossorigin="anonymous"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384- Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script> <nav class="navbar navbar-default navbar-fixed-top"> <div class="container-fluid"> <!-- Brand and toggle get grouped for better mobile display --> <div class="container"> <div class="navbar-header"> <button type="button" class="navbar-toggle collapsed" data- toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria- expanded="false"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <img src="vx-medium.jpg"><a class="navbar-brand" href="#"><strong> VenturX: Prototype</strong></a> </div> <!-- Collect the nav links, forms, and other content for toggling --> <div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1"> <ul class="nav navbar-nav navbar-right"> <li><a href="#"><strong>Hello, [email protected]</strong></a></li> <li><a href="login.html">Log out</a></li> </ul> </div> </div><!-- /.navbar-collapse --> </div><!-- /.container-fluid --> </nav> <div class="img"> <img src="person_male.jpg" class="person"> </div> <div id="content"> <table> <tr> <td><strong>Market</strong></td> <td class="tc">Fill in who are your customers?</td> </tr> <tr> <td><strong>Pain</strong></td> <td class="tc">What is the pain statement you are trying to solve for them?</td> </tr> <tr> <td><strong>Benefit</strong></td> <td class="tc">What is the benefit that will match that pain statement?</td> <td class="editpro"><a class="btn btn-default" href="#" role="button">Edit Profile</a></td> </tr> </table> </div> <div class="title"> <span class="bigf">How is <strong>VenturX <span class="glyphicon glyphicon- pencil" aria-hidden="true"></strong> doing today?</span></span><span class="checkd"> Check dashboard</span> </div> <percent-circle percent=50 class="cir">HighS</percent-circle> <!-- <p class="bg-warning"><strong>Small conversion?</strong> Learn what you can do to <strong><span class="bl">rise your conversion</span></strong>.</p> <p class="bg-danger"><strong>Low product score?<span class="bl"> Try this</span></strong> to get more ...</p> --></body> </html> My percent-circle is not working at all. I've tried increasing the width and height of the circle but its still not working. Its complete blank. The text inside it is showing up but the circle is not coming up. Where can be the issue ? A: script src="C:\Users\Shantanu\node_modules\angular-percent-circle-directive\dist\percent-circle-directive.js" type="text/javascript" This Script of your here, i haven't tested if it can be included this way, but as far as know, this is not the correct war to do it, as angular project when complied, it will gather everything from your src/app directory. If you are installing it with npm, you can use it in angular by: const package-name = require('package-name'); And to use require, do follow these steps: * *run npm i --save-dev @types/node *in you app.modules.ts, write the line: declare let require: any;. *and finally, in your tsconfig.app.json file, write {"extends": "../tsconfig.json","compilerOptions": {"types": ["node"], "typeRoots": [ "../node_modules/@types" ] }} And then include the script file from the imported package. A: All I had to do is add <script type="text/javascript"> var app = angular.module('percentCircleDemo', ['percentCircle-directive']); </script> and add <body ng-app="percentCircleDemo"> I didn't know earlier that to run angular js we need to mention ng-app somewhere in the html code.
doc_559
new Schema({ ... createDate: { type: Date, default: Date.now }, updateDate: { type: Date, default: Date.now } }); Upsert operation: const upsertDoc = { ... } Model.update({ key: 123 }, upsertDoc, { upsert: true }) when I upsert with update or findOneAndUpdate the default schema values createDate and updateDate are always renewed no matter document is inserted or updated. It's same when I use $set (in which of course I don't pass dates). I don't seem to find anything to tell if it's an expected behavior. I expect dates to be added only on insert and not update, unless explicitly set. A: Well, I'd always recommend to use the provided and recommended way to manage createdAt and updatedAt by mongoose. Simply by passing timeStamp: true as schema options. This is always a best practice and lets you not to be worried about such behaviors. I use it and I never see a problem with timestamps using update or findOneAndUpdate. Here is how you use it new Schema({ ... //Your schema },{ timestamps: true}) A: If you are looking for "proof" of the expected behavior, then look no further than the source code itself. Particularly within the schema.js main definition: updates.$setOnInsert = {}; updates.$setOnInsert[createdAt] = now; } return updates; }; this.methods.initializeTimestamps = function() { if (createdAt && !this.get(createdAt)) { this.set(createdAt, new Date()); } if (updatedAt && !this.get(updatedAt)) { this.set(updatedAt, new Date()); } return this; }; this.pre('findOneAndUpdate', _setTimestampsOnUpdate); this.pre('update', _setTimestampsOnUpdate); this.pre('updateOne', _setTimestampsOnUpdate); this.pre('updateMany', _setTimestampsOnUpdate); } function _setTimestampsOnUpdate(next) { var overwrite = this.options.overwrite; this.update({}, genUpdates(this.getUpdate(), overwrite), { overwrite: overwrite }); applyTimestampsToChildren(this); next(); } So there you can see all the 'pre' middleware handlers being registered for each of the "update" method variants and to the same functional code. These all essentially modify the $set operator in any "update" you issue to include the updatedAt field, or whatever name you mapped to that key in the schema options. The actual statement sent with "upsert" actions uses $setOnInsert for the createdAt field or mapped option name ( see the top of the listing ). This action only applies when an "upsert" actually occurs, so documents that exist and are merely matches for any of the "update" methods are never actually touched by this value. Those operators are part of how MongoDB works and not really to do with mongoose, but the code shown here shows how mongoose "adjusts" your "update" actions in order to include these additional operations. For reference the whole main function in schema.js which works out what to apply currently begins at Line #798 for the genUpdates() function as called in the bottom part of the listing shown here yet the top part is the last few lines of that function where the keys of $setOnInsert get defined. So in summary, YES every "update" action is intentional that the updatedAt mapped field has the current Date value assigned, and also that the "updates" are modified to include the $setOnInsert action which only applies when a new document is created as the result of an "upsert" action for the createdAt mapped field.
doc_560
a string that has Min of 8 characters, including at least 2 Capital letters and 1 Number. please help thanks A: This article could help you build one: (?=.{8,})(?=(?:.*[A-Z]){2})(?=.*[0-9]) A: This might do the trick for your specific case: ^(?=.{8})(?=.*[A-Z].*[A-Z])(?=.*\d).*$
doc_561
But instead of dots, I want to use a picture of a car with the background cut out. A little line of cars all the way around the box. How can I do that? Here's my attempt: .box-head { padding: 10px; margin: 0px auto; width: 50%; border: 5px; border-radius: 10px; text-align: center; background: linear-gradient(to right, #00e6ff, #6418ff); font-family: cursive; } <!DOCTYPE html> <html> <head> <title>Skyline's Pointless Website</title> <link rel="border" href"border.png"> </head> <body> <div class="box-head"> <h1>Welcome fello plebs to this completely pointless website.</h1> <p> You may notice this website isn't that good. I just started learning html and css so leave me alone.</p> </div> </body> </html> Here's the car image: A: Here's an example using border-image: The border-image CSS property lets you draw an image in place of an element's border-style. .box-head { padding: 42px; width: 60%; text-align: center; font-family: cursive; border-image-source: url("//mdn.mozillademos.org/files/6017/border-image-6.svg"); border-image-slice: 42 fill; border-image-width: 42px; border-image-repeat: round; } h1 { font-size: 16px; } p { font-size: 12px; } <div class="box-head"> <h1>Welcome fello plebs to this completely pointless website.</h1> <p> You may notice this website isn't that good. I just started learning html and css so leave me alone.</p> </div> Using an online generator might help demonstrate how it works: MDN border-image.com Also see: css-tricks.com bitsofco.de thenewcode.com Edit You explained that you want a line of cars around the box. To do this with border-image, make an image with the car tiled in a 3x3 grid. The different "zones" are described in documentation for border-image-slice: * *Zones 1-4 are corner regions. Each one is used a single time to form the corners of the final border image. *Zones 5-8 are edge regions. These are repeated, scaled, or otherwise modified in the final border image to match the dimensions of the element. *Zone 9 is the middle region. It is discarded by default, but is used like a background image if the keyword fill is set. Here's my example image: Here's a working example of the border: .box-head { padding: 30px 100px; width: 60%; text-align: center; font-family: cursive; border-style: solid; border-image-source: url("//i.stack.imgur.com/ODGdz.png"); border-image-slice: 30 100; border-image-width: 30px 100px; border-image-outset: 0; border-image-repeat: round; } h1 { font-size: 16px; } p { font-size: 12px; } <div class="box-head"> <h1>Welcome fello plebs to this completely pointless website.</h1> <p> You may notice this website isn't that good. I just started learning html and css so leave me alone.</p> </div> For more reference on this method, see: Making a Border of a Single Repeating Image Border Imaging EDIT Here's an example with the cars rotated: .box-head { padding: 30px 50px; width: 60%; text-align: center; font-family: cursive; border-style: solid; border-image-source: url("//i.stack.imgur.com/YQ4EO.png"); border-image-slice: 30 100; border-image-width: 30px 100px; border-image-outset: 0; border-image-repeat: round; } h1 { font-size: 20px; } p { font-size: 16px; } <div class="box-head"> <h1>Welcome, humans, to this amazing website.</h1> <p> You may notice this website tickles your brain. I just started learning HTML and CSS, so the universe is wide open!</p> </div>
doc_562
* *are all service fabric service instances single-threaded? I created a stateless web api, one instance, with a method that did a Task.Delay, then returned a string. Two requests to this service were served one after the other, not concurrently. So am I right in thinking then that the number of concurrent requests that can be served is purely a function of the service instance count in the application manifest? Edit Thinking about this, it is probably to do with the set up of OWIN Wep Api. Could it be it is blocking by session? I assumed there is no session by default? *I have long-running operations that I need to perform in service fabric (that can take several hours). Is there a recommended pattern that I can use for this in service fabric? These are currently handled using a storage queue that triggers a webjob. Maybe something with Reliable Queues and a RunAsync loop? A: It seems you handled the first part so I will comment on the second part: "long-running operations". We can see long running operations / workflows being handled far before service fabric came about. For this reason, we can build on the shoulders of giants by looking on the design patterns that software experts have been using for decades. For example, the famous and all inclusive Process Manager. Mind you that this pattern is sometimes an overkill. If it is in your case, just check out the rest of the related patterns in the Enterprise Integration Patterns book (by Gregor Hohpe). As for the use of reliable collections, those are implementation details when choosing a data structure supporting the chosen design pattern. I hope that helps A: With regards to your second point - It really depends on the nature of your long running task. Is your long running task the kind of workload that runs on an isolated thread that depends on local OS/VM level resources and eventually comes back with a result (A)? or is it the kind of long running task that goes through stages and builds up a model of the result through a series of persisted state changes (B)? From what I understand of Service Fabric, it isn't really designed for running long running workloads (A), but more for writing horizontally-scalable, highly-available systems. If you were absolutely keen on using service fabric (and your kind of workload tends to be more like B than A) I would definitely find a way to break down those long running tasks that could be processed in parallel across the cluster. But even then, there is probably more appropriate technologies designed for this such as Azure Batch? P.s. If you are going to put a long running process in the RunAsync method, you should design the workload so it is interruptable and its state can be persisted in a way that can be resumed from another node in the cluster In a stateful service, only the primary replica has write access to state and thus is generally when the service is performing actual work. The RunAsync method in a stateful service is executed only when the stateful service replica is primary. The RunAsync method is cancelled when a primary replica's role changes away from primary, as well as during the close and abort events. P.s.s Long running operations are the devil when trying to write scalable systems. Try and tackle that now and save yourself the future pain if possibe. A: To the first point - this is purely a client issue. Chrome saw my requests as indentical and so delayed the 2nd request until the 1st got a response. Varying the parameter of the requests allowed them to be served concurrently.
doc_563
@Html.RadioButtonForSelectList(model => model.ViewModelForThingCreate.ThingTypeID, Model.ViewModelForCarCreate.CarTypeSelectList) and: public static MvcHtmlString RadioButtonForSelectList<TModel, TProperty>(this HtmlHelper<TModel> HTMLHelper,Expression<Func<TModel, TProperty>> Expression, IEnumerable<SelectListItem> ListOfValues) { var MetaData = ModelMetadata.FromLambdaExpression(Expression, HTMLHelper.ViewData); var SB = new StringBuilder(); if (ListOfValues != null) { foreach (SelectListItem Item in ListOfValues) { var ID = string.Format("{0}_{1}", MetaData.PropertyName, Item.Value); var Radio = HTMLHelper.RadioButtonFor(Expression, Item.Value, new { id = ID }).ToHtmlString(); SB.AppendFormat("<label class=\"radio inline\" for=\"{0}\">{1} {2}</label>", ID, Radio, HttpUtility.HtmlEncode(Item.Text)); } } return MvcHtmlString.Create(SB.ToString()); } Thanks! A: This is your custom helper with the buttons set to not checked. Try this, I assume it will render all radio buttons unchecked. public static MvcHtmlString RadioButtonForSelectList<TModel, TProperty>(this HtmlHelper<TModel> HTMLHelper,Expression<Func<TModel, TProperty>> Expression, IEnumerable<SelectListItem> ListOfValues) { var MetaData = ModelMetadata.FromLambdaExpression(Expression, HTMLHelper.ViewData); var SB = new StringBuilder(); if (ListOfValues != null) { foreach (SelectListItem Item in ListOfValues) { var ID = string.Format("{0}_{1}", MetaData.PropertyName, Item.Value); var Radio = HTMLHelper.RadioButtonFor(Expression, Item.Value, new { id = ID }).ToHtmlString(); SB.AppendFormat("<label class=\"radio inline\" checked="false" for=\"{0}\">{1} {2}</label>", ID, Radio, HttpUtility.HtmlEncode(Item.Text)); } } return MvcHtmlString.Create(SB.ToString()); } A: Unfortunately One radio button must always be checked. That is the unfortunate part about radio buttons; however, You could always add a hidden radio button to your form and set the checked property to true; Have your internal code accept a null or whatever you expect if nothing is selected from it. Try Setting all of the radio buttons value's to unchecked or false foreach(button in ButtonGroup){ button.checked = false; } A: I've just tried your method in mvc3 template and it seems to work fine for me. Basically I've created some Model public class IndexModel { public string ID; public IEnumerable<SelectListItem> Elements; } Then created instance and filled values: var model = new IndexModel() { ID = "a", Elements = new List<SelectListItem>() { new SelectListItem() { Text = "test1", Value = "1"}, new SelectListItem() { Text = "test2", Value = "2"}} }; In view I've used your extension method <form> @(Extensions.RadioButtonForSelectList(Html, x => x.ID, Model.Elements)) <button type="reset">Reset</button> </form> All seem perfectly fine after launch. Fields are not selected at load and they're cleared after pressing "Reset" button. Can you give some more details as I'm not sure if I fully understand what are you trying to achieve :-) EDIT: Here's example in plain HTML of radio buttons. They're definitely not filled at the beginning and if you want them to be required add required but by default you can send form without selecting any radio button. Also you can make one checked by adding checked as in second example. Are you using some javascript on client side? Maybe it is causing this side-effect? http://jsbin.com/isadun/1 mz
doc_564
Use <- read_csv("FinalData.csv")%>% filter(Week %between% c("1", "7")) %>% dplyr::select(-c(LactationNumber,DaysPerWeek, TotalWeeks))%>% mutate_at(vars(Farm,Parity,CowNr,Week,Failures_Avg), as.factor)%>% mutate_at(vars(Milk_AvgDay,Milk_AvgVisit,IntervalCV), as.numeric)%>% group_by(Farm,CowNr) Here I read in failures_Avg as a categorical variable but since it is continuous there are about 65 levels. I am trying to change it from this into Low - =0 Med - Between 0 & including 1 High - Greater than 1 Thank you in advance A: We can use cut library(dplyr) Use %>% mutate(failures_Avg = cut(failures_Avg, breaks = c(-Inf, 0, 1, Inf), labels = c("Low", "Med", "High"))) or case_when Use <- Use %>% mutate(failures_Avg = case_when(failures_Avg == 0 ~ "Low", (failures_Avg > 0 & failures_Avg < 1) ~ "Med", TRUE ~ "High"))
doc_565
A: i got it finally :-) In the case that someone needs it: var properties = new Property[1]; var hidden = new Property { Name = "Hidden", Value = "True" }; properties[0] = hidden; Warning[] warnings = ReportingService.CreateReport(fileNameWithoutExtension, ssrsFolder, true, fileContents, properties);
doc_566
VERBOSE: Key: 'HKEY_CURRENT_USER\SOFTWARE\blahblah\ProgId' Key : 'HKEY_CURRENT_USER\SOFTWARE\blahblah\ProgId' Reason : ValueData Details : {'foo.Class1'} VERBOSE: Key: 'HKEY_CURRENT_USER\SOFTWARE\blahblah\Version' VERBOSE: Key: 'HKEY_CURRENT_USER\SOFTWARE\blahblah\VersionIndependentProgID' Key : Reason : Details : I can't figure out what's causing the blank lines. My script tests to make sure that the item is not null (using if ($key)), but that line is ignored anyway. Similarly, I understand that old versions of PowerShell didn't test for null, but I'm using PS 5. The script is here (minus the synopsis and description, which I can't get to format correctly here): [CmdletBinding()] Param( [Parameter(Mandatory, Position=0, ValueFromPipelineByPropertyName)] [Alias("PsPath")] # Registry path to search [string[]] $Path, # Specifies whether or not all subkeys should also be searched [switch] $Recurse, [Parameter(ParameterSetName="SingleSearchString", Mandatory)] # A regular expression that will be checked against key names, value # names, and value data (depending on the specified switches) [string] $SearchRegex, [Parameter(ParameterSetName="SingleSearchString")] # When the -SearchRegex parameter is used, this switch means that key # names will be tested (if none of the three switches are used, keys # will be tested) [switch] $KeyName, [Parameter(ParameterSetName="SingleSearchString")] # When the -SearchRegex parameter is used, this switch means that the # value names will be tested (if none of the three switches are used, # value names will be tested) [switch] $ValueName, [Parameter(ParameterSetName="SingleSearchString")] # When the -SearchRegex parameter is used, this switch means that the # value data will be tested (if none of the three switches are used, # value data will be tested) [switch] $ValueData, [Parameter(ParameterSetName="MultipleSearchStrings")] # Specifies a regex that will be checked against key names only [string] $KeyNameRegex, [Parameter(ParameterSetName="MultipleSearchStrings")] # Specifies a regex that will be checked against value names only [string] $ValueNameRegex, [Parameter(ParameterSetName="MultipleSearchStrings")] # Specifies a regex that will be checked against value data only [string] $ValueDataRegex ) Begin { switch ($PSCmdlet.ParameterSetName) { SingleSearchString { $NoSwitchesSpecified = -not ( $PSBoundParameters.ContainsKey("KeyName") -or $PSBoundParameters.ContainsKey("ValueName") -or $PSBoundParameters.ContainsKey("ValueData") ) if ($KeyName -or $NoSwitchesSpecified) { $KeyNameRegex = $SearchRegex } if ($ValueName -or $NoSwitchesSpecified) { $ValueNameRegex = $SearchRegex } if ($ValueData -or $NoSwitchesSpecified) { $ValueDataRegex = $SearchRegex } } MultipleSearchStrings { # No extra work needed } } } Process { $sw = [Diagnostics.Stopwatch]::StartNew() foreach ($CurrentPath in $Path) { Write-Verbose ("CurrentPath: {0}" -f $CurrentPath) try { Get-ChildItem $CurrentPath -Recurse:$Recurse | ForEach-Object { $Key = $_ Write-Verbose ("Key: '{0}'" -f $Key.Name) if ($Key) { if ($KeyNameRegex) { #Write-Verbose ("{0}: Checking KeyNameRegex" -f $Key.Name) if ($Key.PSChildName -match $KeyNameRegex) { #Write-Verbose(" -> Match found! Key.PSChildname = {0}" -f $Key.PSChildName) return [PSCustomObject] @{ Key = "'" + $Key + "'" Reason = "KeyName" Details = $Key.PSChildName } } } if ($ValueNameRegex) { #Write-Verbose ("{0}: Checking ValueNameRegex" -f $Key.Name) if ($Key.GetValueNames() -match $ValueNameRegex) { $MatchingItem = ($Key.GetValueNames() | Select-String -Pattern $ValueNameRegex) #Write-Verbose(" -> Matching value name found! MatchingItem = {0}" -f $MatchingItem) return [PSCustomObject] @{ Key = "'" + $Key + "'" Reason = "ValueName" Details = $MatchingItem } } } if ($ValueDataRegex) { #Write-Verbose ("{0}: Checking ValueDataRegex" -f $Key.Name) if (($Key.GetValueNames() | % { $Key.GetValue($_) }) -match $ValueDataRegex) { $MatchingItems = ($Key.GetValueNames() | % { $Key.GetValue($_) } | Where {$_ -match $valueDataRegex}) $NewList = @() foreach ($Item in $MatchingItems) { $NewList += "'$Item'" } #Write-Verbose(" -> Matching value data found! NewList = {0}" -f $NewList) return [PSCustomObject] @{ Key = "'" + $Key + "'" Reason = "ValueData" Details = $NewList } } } } } } catch { Write-Warning "Failure for $CurrentPath" Write-Warning "Details: $($_.Exception.Message)" } } # end foreach $sw.Stop() Write-Output ("Seconds elapsed: {0}" -f $sw.Elapsed.TotalSeconds) } and a sample invocation is here: search-registry-detailed.ps1 -Path 'HKCU:\SOFTWARE\Classes\Wow6432Node\CLSID' -recurse -SearchRegex "foo" -ErrorAction SilentlyContinue -Verbose | Select Key,Reason,Details | fl
doc_567
<script type="text/javascript"> var myDate=new Date(); myDate.setFullYear(2015,2,14); var today = new Date(); if (myDate>today) { document.body.style.display = "none"; } </script> A: The simplest approach I could think of: var myDate = new Date(), today = new Date(), // create an element to contain your content: note = document.createElement('h1'); // set the content of that created-element: note.textContent = 'Test Over'; // remember that, in JavaScript, months are zero-indexed, // 0 = January, 1 = February (you were originally testing // against a date in March): myDate.setFullYear(2015, 1, 14); if (myDate > today) { // while there's any content in the <body>: while (document.body.firstChild){ // we remove that content, each time removing the // firstChild: document.body.removeChild(document.body.firstChild); } // appending the created-element: document.body.appendChild(note); } var myDate = new Date(), today = new Date(), note = document.createElement('h1'); note.textContent = 'Test Over'; myDate.setFullYear(2015, 1, 14); if (myDate > today) { while (document.body.firstChild){ document.body.removeChild(document.body.firstChild); } document.body.appendChild(note); } <h1>This text should not be visible</h1> References: * *Date() constructor. *Date.prototype.setFullYear(). *document.createElement(). *Node.appendChild(). *Node.firstChild. *while(). A: if you want to hide the body After the designated date, then you need to check if your date is smaller than today meaning if it passed, not the other way around var myDate = new Date(); myDate.setFullYear(2015, 1, 11); var today = new Date(); if (myDate < today) { // this will just replace the body's html with your text document.body.innerHTML="some text"; } div { display: inline-block; width: 200px; height: 200px; } .beforeDate { background: red; } .afterDate { background: cyan; } <div class="afterDate">This one will be hidden after the date</div> UPDATE: if you want to put your script in the head, you need to make sure it doesnt run before the elements in the body are created, so you wont try to alter objects that do not exist yet. you can do it by attaching a custom handler to the document's load event, which will fire when the DOM is loaded and there you put your script: document.addEventListener("load", function () { var myDate = new Date(); myDate.setFullYear(2015, 1, 11); var today = new Date(); if (myDate < today) { // this will just replace the body's html with your text document.body.innerHTML = "some text"; } },true);
doc_568
Any help here / any tips you can offer would be appreciated. <?php $name_error = ""; $name = ""; if ($_SERVER["REQUEST_METHOD"] == "POST") { if (empty($_POST["name"])) { $name_error = "Whoops! Please enter your full name"; } else { $name = test_input($_POST["name"]); if (!preg_match("/^[a-zA-Z ]*$/",$name)) { $name_error = "Whoops! Only letters and white spaces are allowed in a name"; } } if ($name_error == ''){ $message_body = ''; unset($_POST['submit']); foreach ($_POST as $key => $value){ $message_body .= "$key: $value\n"; } $to = '[email protected]'; $subject = 'Contact Us Form Submission'; if (mail($to, $subject, $message_body )){ $success = "Your message has been sent. A representative will be in contact with you shortly."; $name = $email = $phone = $message = $inquiry = $hear = $success = ''; } } function test_input($data) { $data = trim($data); $data = stripslashes($data); $data = htmlspecialchars($data); return $data; } ?> A: Your "mail" logic needs to sit within the server request method, an easy fix would be to duplicate your if logic to be wrapped around your mailing logic. i.e. if($_SERVER["REQUEST_METHOD"] == "POST"){ if (mail($to, $subject, $message_body )){ $success = "Your message has been sent. A representative will be in contact with you shortly."; $name = $email = $phone = $message = $inquiry = $hear = $success = ''; } } Of course, you may as well just remove your current if statements closing bracket, and chuck it at the end of your code (as theres no need to break the if statement so early) i.e. <?php $name_error = ""; $name = ""; if ($_SERVER["REQUEST_METHOD"] == "POST") { if (empty($_POST["name"])) { $name_error = "Whoops! Please enter your full name"; } else { $name = test_input($_POST["name"]); if (!preg_match("/^[a-zA-Z ]*$/",$name)) { $name_error = "Whoops! Only letters and white spaces are allowed in a name"; } } if ($name_error == ''){ $message_body = ''; unset($_POST['submit']); foreach ($_POST as $key => $value){ $message_body .= "$key: $value\n"; } $to = '[email protected]'; $subject = 'Contact Us Form Submission'; if (mail($to, $subject, $message_body )){ $success = "Your message has been sent. A representative will be in contact with you shortly."; $name = $email = $phone = $message = $inquiry = $hear = $success = ''; } } // <--- ADD CLOSING BRACKET HERE function test_input($data) { $data = trim($data); $data = stripslashes($data); $data = htmlspecialchars($data); return $data; }
doc_569
val daggerWorkerFactory = DaggerWorkerFactory(toInjectInWorker) val configuration = Configuration.Builder() .setWorkerFactory(daggerWorkerFactory) .build() WorkManager.initialize(context, configuration) After this code execution, I can get the WorkManager instance: val workManager = WorkManager.getInstance() The problem is that for every worker created after this point, my factory is never used. The default factory is used instead. I can see in the API documentation that the method "WorkManager.initialize" has a note: Disable androidx.work.impl.WorkManagerInitializer in your manifest I cannot find any information on how to do this. Was this on some older versions of the WorkManager and they forgot to remove from the documentation or is this really necessary? If so, how? A: From the documentation of WorkerManager.initialize() By default, this method should not be called because WorkManager is automatically initialized. To initialize WorkManager yourself, please follow these steps: Disable androidx.work.impl.WorkManagerInitializer in your manifest In Application#onCreate or a ContentProvider, call this method before calling getInstance() So what you need is to disable WorkManagerInitializer in your Manifest file: <application //... android:name=".MyApplication"> //... <provider android:name="androidx.work.impl.WorkManagerInitializer" android:authorities="your-packagename.workmanager-init" android:enabled="false" android:exported="false" /> </application> And in your custom Application class, initialize your WorkerManager: class MyApplication : Application() { override fun onCreate() { super.onCreate() val daggerWorkerFactory = DaggerWorkerFactory(toInjectInWorker) val configuration = Configuration.Builder() .setWorkerFactory(daggerWorkerFactory) .build() WorkManager.initialize(context, configuration) } } Note: By default, WorkerManager will add a ContentProvider called WorkerManagerInitializer with authorities set to my-packagename.workermanager-init. If you pass wrong authorities in your Manifest file while disabling the WorkerManagerInitializer, Android will not be able to compile your manifest.
doc_570
My aim is to split my scan via row key and distribute a set of rows each to a map job. As far as now I am only able to define a scan where my mappers get always one row at a time. But that is not what I want - I need the map-input set-wise. So is there a possibility to split-up my HBase table resp. the scan into n sets of rows, which are then input for n mappers? I am not looking for a solution to start a MapReduce job writing n files and another MapReduce job for reading them back again as text input for getting these sets. Thanks in advance! A: Mappers will always get one row at a time - that's the way map-reduce work if you want to relate to multiple rows on the map side you can either do that yourself (e.g using some static variables etc.) or write the logic as a combiner which is a map-side "reduce" step. Note that you'd still need a reducer to handle the edge cases where related keys were handles by different mappers - since in hbase keys are ordered on disk you'd only get that at the end/begining of a split. You can reduce the risk of this happening by pre-splitting A: Looking into the implementation I saw that calling the map-step with one scan results in exactly one mapper used. This is why the input set is not split at all. Using a list of scans, giving it to the TableMapReduceUtil.initTableReducerJob function, the input set is split at each scan. Thereby one can define the number of mappers used in the MapReduce job. Another way would be to extend the TableInputFormat class and rewrite the split method. As Arnon Rotem-Gal-Oz said correctly, one can only access one row at a time within the mapper's map function.
doc_571
I'm trying to understand the input & output parameters for the given Conv2d code: import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x net = Net() My conv2d() understanding (Please correct if I am wrong/missing anything): * *since image has 3 channels that's why first parameter is 3. 6 is no of filters (randomly chosen) *5 is kernel size (5, 5) (randomly chosen) *likewise we create next layer (previous layer output is input of this layer) *Now creating a fully connected layer using linear function: self.fc1 = nn.Linear(16 * 5 * 5, 120) 16 * 5 * 5: here 16 is the output of last conv2d layer, But what is 5 * 5 in this?. Is this kernel size ? or something else? How to know we need to multiply by 5*5 or 4*4 or 3*3..... I researched & got to know that since image size is 32*32, applying max pool(2) 2 times, so image size would be 32 -> 16 -> 8, so we should multiply it by last_ouput_size * 8 * 8 But in this link its 5*5. Could anyone please explain? A: These are the dimensions of the image size itself (i.e. Height x Width). Unpadded convolutions Unless you pad your image with zeros, a convolutional filter will shrink the size of your output image by filter_size - 1 across the height and width: 3-filter takes a 5x5 image to a (5-(3-1) x 5-(3-1)) image Zero padding preserves image dimensions You can add padding in Pytorch by setting Conv2d(padding=...). Chain of transformations Since it has gone through: Layer Shape Transformation one conv layer (without padding) (h, w) -> (h-4, w-4) a MaxPool -> ((h-4)//2, (w-4)//2) another conv layer (without padding) -> ((h-8)//2, (w-8)//2) another MaxPool -> ((h-8)//4, (w-8)//4) a Flatten -> ((h-8)//4 * (w-8)//4) We go from the original image size of (32,32) to (28,28) to (14,14) to (10,10) to (5,5) to (5x5). To visualise this you can use the torchsummary package: from torchsummary import summary input_shape = (3,32,32) summary(Net(), input_shape) ---------------------------------------------------------------- Layer (type) Output Shape Param # ================================================================ Conv2d-1 [-1, 6, 28, 28] 456 MaxPool2d-2 [-1, 6, 14, 14] 0 Conv2d-3 [-1, 16, 10, 10] 2,416 MaxPool2d-4 [-1, 16, 5, 5] 0 Linear-5 [-1, 120] 48,120 Linear-6 [-1, 84] 10,164 Linear-7 [-1, 10] 850 ================================================================
doc_572
I am pretty sure this is because modules on my master are not visible to the worker node. I tried importing numpy but it didn't work even though I have numpy installed on my worker through anaconda. I have anaconda installed on both master and worker in the same way. However, following Josh Rosen's advice I made sure that I installed the libraries on the worker nodes. https://groups.google.com/forum/#!topic/spark-users/We_F8vlxvq0 However, I still seem to be getting issues. Including the fact that my worker does not recognize the command abs. which is standard in python 2.6 The code I am running is from this post: https://districtdatalabs.silvrback.com/getting-started-with-spark-in-python def isprime(n): """ check if integer n is a prime """ # make sure n is a positive integer n = abs(int(n)) # 0 and 1 are not primes if n < 2: return False # 2 is the only even prime number if n == 2: return True # all other even numbers are not primes if not n & 1: return False # range starts with 3 and only needs to go up the square root of n # for all odd numbers for x in range(3, int(n**0.5)+1, 2): if n % x == 0: return False return True # Create an RDD of numbers from 0 to 1,000,000 nums = sc.parallelize(xrange(1000000)) # Compute the number of primes in the RDD print nums.filter(isprime).count() A: I often use the anaconda distribution with PySpark as well and find it useful to set the PYSPARK_PYTHON variable, pointing to the python binary within the anaconda distribution. I've found that otherwise I get lots of strange errors. You might be able to check with python is being used by running rdd.map(lambda x: sys.executable).distinct().collect(). I suspect it's not pointing to the correct location. In any case, I recommend wrapping the configuration of your path and environment variables in a script. I use the following. def configure_spark(spark_home=None, pyspark_python=None): spark_home = spark_home or "/path/to/default/spark/home" os.environ['SPARK_HOME'] = spark_home # Add the PySpark directories to the Python path: sys.path.insert(1, os.path.join(spark_home, 'python')) sys.path.insert(1, os.path.join(spark_home, 'python', 'pyspark')) sys.path.insert(1, os.path.join(spark_home, 'python', 'build')) # If PySpark isn't specified, use currently running Python binary: pyspark_python = pyspark_python or sys.executable os.environ['PYSPARK_PYTHON'] = pyspark_python When you point to your anaconda binary, you should also be able to import all the packages installed in its site-packages directory. This technique should work for conda environments as well.
doc_573
linker command failed with exit code 1 (use -v to see invocation) Native linking failed for '/Users/{name}/Desktop/{application name}/{application name}/obj/iPhone/Debug/device-builds/iphone8.1-11.1.2/mtouch-cache/arm64/libSomething.dll.dylib'. Please file a bug report at http://bugzilla.xamarin.com I get these errors with a dll generated from a binding of a (native obj-c) static library with XCode 9.2 I'm using Visual Studio 7.2 with Xamarin.iOS 11.6.1.2 A: I fixed it. The error was in the Static Library, was missing two classes. I've added these two in XCode and then works.
doc_574
Labels change every few minutes, but the location coordinates change every few seconds. I'm using renderUI on server side and uiOutput on UI side. Looking forward for some help, thanks. library("httr") library("jsonlite") library("shiny") library("leaflet") library("dplyr") ui <- shinyUI(fluidPage( navbarPage("Title", tabPanel("MAP", leafletOutput("mymap", width = "auto", height = "560px") ) ), uiOutput("loc") ) ) server <- shinyServer(function(input, output) { autoInvalidate <- reactiveTimer(5000) reData <- eventReactive(autoInvalidate(), { # # example data # lat <- c(20.51,20.52,20.65) # long <- c(10.33,13.43,23.54) # labels <- c('John','Peter','Jolie') # data <- data.frame(lat, long, labels) # API call #1 response get_data <- GET(call1) get_data_text <- content(get_data, "text") get_data_json <- fromJSON(get_data_text, flatten = TRUE) data <- get_data_json$result # handling empty API response while(class(data) == "list"){ Sys.sleep(1) get_trams <- GET(call1) get_data_text <- content(get_data, "text") get_data_json <- fromJSON(get_data_text, flatten = TRUE) data <- get_data_json$result } # saving data before filtering - purpose of getting labels for the drop-down list and # creating a sorted list for selectInput function list_of_vals <- data uniq_first_lines <- c("all", unique(as.character(sort(as.numeric(list_of_vals$FirstLine))))) sorted_factor <- factor(uniq_first_lines, levels=uniq_first_lines) my_new_list <- split(uniq_first_lines, sorted_factor) # filter data if(input$loc != "all") { data <- data %>% filter_at( vars(one_of("FirstLine")), any_vars(.==input$loc)) } rownames(data) <- NULL return(list(data=data, my_new_list=my_new_list)) }, ignoreNULL = FALSE) output$loc <-renderUI({ selectInput("loc", label = h4("Choose location"), choices = reData()$my_new_list ,selected = "all" ) }) points <- eventReactive(autoInvalidate(), { cbind(reData()$trams_data$Lon, reData()$trams_data$Lat) },ignoreNULL = FALSE) labels <- eventReactive(autoInvalidate(), { paste("line: ", reData()$trams_data$FirstLine) },ignoreNULL = FALSE) output$mymap <- renderLeaflet({ leaflet() %>% addTiles() }) observeEvent(autoInvalidate(), { leafletProxy("mymap") %>% clearMarkers() %>% addMarkers( data = points(), label = labels() ) },ignoreNULL = FALSE) }) shinyApp(ui, server) A: I am going with a minimal example and with the small dataset you provided. You can adapt my example for your needs but I want to show you the use of a reactive dataset so you can filter by labels. Are you looking for something like that: library("httr") library("jsonlite") library("shiny") library("leaflet") library("dplyr") # # example data lat <- c(20.51,20.52,20.65) long <- c(10.33,13.43,23.54) labels <- c('John','Peter','Jolie') data <- data.frame(lat, long, labels) ui <- shinyUI(fluidPage( navbarPage("Title", tabPanel("MAP", leafletOutput("mymap", width = "auto", height = "560px") ) ), uiOutput("labels") ) ) server <- shinyServer(function(input, output) { output$labels <- renderUI({ selectInput("labels", label = h4("Choose label"), choices = c("John", "Peter", "Jolie") ,selected = "John") }) reData <- reactive({ autoInvalidate <- reactiveTimer(5000) data <- data %>% dplyr::filter(input$labels == labels) }) output$mymap <- renderLeaflet({ leaflet(reData()) %>% setView(10, 20, zoom = 5) %>% addTiles() %>% addMarkers() }) # observeEvent(autoInvalidate(), { # leafletProxy("mymap") %>% # clearMarkers() %>% # addMarkers( # data = points(), # label = labels() # ) # },ignoreNULL = FALSE) }) shinyApp(ui, server)
doc_575
{ int i; char *names[5] = {"Miri", "Tali", "Ronit", "Avigail", "Shlomit"}; //Printing all the names: for (i=0; i<5; i++) printf("%s\n" , names[i]); return 0; } How come its print the whole name? does names[0] (for example) shouldn't print only M? A: names is array of character pointers. So names[0] is char * pointing to "Miri". And similarly for other subsequent items. A: In your program names is an array of pointers to char as already was mentioned in Rohan's answer, so to print the first character you should first access the array element and then the the 0th character printf("%c\n", names[i][0]); also note that the appropriate specifier is "%c" for one character, since "%s" expects a pointer to char, which should point to a null terminated sequence of bytes, i.e. a string. Aditionally you should declare the array like const char *names[5] = {"Miri", "Tali", "Ronit", "Avigail", "Shlomit"}; because the elements are string literals, and should not be modified. So using the const specifier you prevent acidentally doing that.
doc_576
Backtrace: * thread #1: tid = 0x848b34, 0x000000010eb3bc50 libswiftCore.dylib`function signature specialization <preserving fragile attribute, Arg[2] = Dead, Arg[3] = Dead> of Swift._fatalErrorMessage (Swift.StaticString, Swift.StaticString, Swift.StaticString, Swift.UInt, flags : Swift.UInt32) -> Swift.Never + 96, queue = 'com.apple.main-thread', stop reason = EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0) frame #0: 0x000000010eb3bc50 libswiftCore.dylib`function signature specialization <preserving fragile attribute, Arg[2] = Dead, Arg[3] = Dead> of Swift._fatalErrorMessage (Swift.StaticString, Swift.StaticString, Swift.StaticString, Swift.UInt, flags : Swift.UInt32) -> Swift.Never + 96 * frame #1: 0x000000010c19c231 Exercise Generator`createNew.viewDidLoad(self=0x00007f9d12d69b30) -> () + 177 at createNew.swift:43 frame #2: 0x000000010c19c722 Exercise Generator`@objc createNew.viewDidLoad() -> () + 34 at createNew.swift:0 frame #3: 0x000000010d32ea3d UIKit`-[UIViewController loadViewIfRequired] + 1258 frame #4: 0x000000010d32ee70 UIKit`-[UIViewController view] + 27 frame #5: 0x000000010dbef6a4 UIKit`-[_UIFullscreenPresentationController _setPresentedViewController:] + 87 frame #6: 0x000000010d309702 UIKit`-[UIPresentationController initWithPresentedViewController:presentingViewController:] + 141 frame #7: 0x000000010d341e97 UIKit`-[UIViewController _presentViewController:withAnimationController:completion:] + 3956 frame #8: 0x000000010d34526b UIKit`-[UIViewController _performCoordinatedPresentOrDismiss:animated:] + 530 frame #9: 0x000000010d344d51 UIKit`-[UIViewController presentViewController:animated:completion:] + 179 frame #10: 0x000000010c1b18fa Exercise Generator`openRecent.myaction(sender=0x00007f9d12c45f50, self=0x00007f9d12c451e0) -> () + 1706 at openRecent.swift:57 frame #11: 0x000000010c1b1afa Exercise Generator`@objc openRecent.myaction(sender : UIButton!) -> () + 58 at openRecent.swift:0 frame #12: 0x000000010d18e8bc UIKit`-[UIApplication sendAction:to:from:forEvent:] + 83 frame #13: 0x000000010d314c38 UIKit`-[UIControl sendAction:to:forEvent:] + 67 frame #14: 0x000000010d314f51 UIKit`-[UIControl _sendActionsForEvents:withEvent:] + 444 frame #15: 0x000000010d313e4d UIKit`-[UIControl touchesEnded:withEvent:] + 668 frame #16: 0x000000010d6be304 UIKit`_UIGestureEnvironmentSortAndSendDelayedTouches + 5645 frame #17: 0x000000010d6b8fcb UIKit`_UIGestureEnvironmentUpdate + 1472 frame #18: 0x000000010d6b89c3 UIKit`-[UIGestureEnvironment _deliverEvent:toGestureRecognizers:usingBlock:] + 521 frame #19: 0x000000010d6b7ba6 UIKit`-[UIGestureEnvironment _updateGesturesForEvent:window:] + 286 frame #20: 0x000000010d1fdc1d UIKit`-[UIWindow sendEvent:] + 3989 frame #21: 0x000000010d1aa9ab UIKit`-[UIApplication sendEvent:] + 371 frame #22: 0x000000010d99772d UIKit`__dispatchPreprocessedEventFromEventQueue + 3248 frame #23: 0x000000010d990463 UIKit`__handleEventQueue + 4879 frame #24: 0x000000010cd0d761 CoreFoundation`__CFRUNLOOP_IS_CALLING_OUT_TO_A_SOURCE0_PERFORM_FUNCTION__ + 17 frame #25: 0x000000010ccf298c CoreFoundation`__CFRunLoopDoSources0 + 556 frame #26: 0x000000010ccf1e76 CoreFoundation`__CFRunLoopRun + 918 frame #27: 0x000000010ccf1884 CoreFoundation`CFRunLoopRunSpecific + 420 frame #28: 0x00000001113f1a6f GraphicsServices`GSEventRunModal + 161 frame #29: 0x000000010d18cc68 UIKit`UIApplicationMain + 159 frame #30: 0x000000010c1af2ef Exercise Generator`main + 111 at AppDelegate.swift:12 frame #31: 0x000000011046768d libdyld.dylib`start + 1 Relevant Code: Present method: func myaction(sender: UIButton!) { //several buttons are created programmatically, all trigger this function print("button pressed") print(sender.currentTitleColor) switch sender.tag { case 0 : print("First Button") case 1 : print("Second Button") default: print("button action from undefined button") } present(createNew(), animated: true, completion: nil) } Destination (createNew()) @IBOutlet weak var nameField: UITextField!{ didSet { nameField.delegate = self } } @IBOutlet weak var tagButton: UIButton! @IBOutlet var PatientPicker: UIPickerView! @IBOutlet var OrientationPicker: UIPickerView! @IBOutlet var tagLabel: UILabel! @IBOutlet weak var saveButton: UIButton! var patientPickerData : [String] = [String]() var orientationPickerData: [String] = [String]() var exerciseName: String = "Untitled" var orientation : String = String() var tag: String = "" override func viewDidLoad() { super.viewDidLoad() // Do any additional setup after loading the view. //Picker info self.PatientPicker.dataSource = self //Error happens here self.PatientPicker.delegate = self self.OrientationPicker.dataSource = self self.OrientationPicker.delegate = self patientPickerData = ["--Select Patient--", "No Patient"] orientationPickerData = ["--Select Orientation--","Standing","Reclining"] //End of viewDidLoad() } This should be all the code relevant to the problem. If needed, there is some more stuff I could post, but as I said earlier this works with storyboard segues and for some reason does not work when presented programmatically. A: this works with storyboard segues Yes, because when you do a segue you are using the view controller in the storyboard, where it has a view, and that view contains a picker view and all the outlets get hooked up. But your createNew creates a different view controller, one with an empty view containing no picker view or anything else. So your outlets are never hooked up, they are nil, and you crash. You need to do what the segue does: pull out the view controller from the storyboard.
doc_577
A: You can use the string constructor: dim s as string = "1234" s = New String(" ", s.length) With a listbox, you might want to consider removing the item instead of setting it to spaces: ListBox1.Items.RemoveAt(i) Here's one way to remove an arbitrary word from a string s: sRemove = "abc" i = s.IndexOf(sRemove) If i >= 0 Then s = s.Substring(0, i) & New String(" ", sRemove.Length) & s.Substring(i + sRemove.Length)
doc_578
Here is my method : public sendPaginationMessage = async ( message: Message, pages: MessageEmbed[], emojiList: (GuildEmoji | string)[] = ['⏪', '⏩'], timeout = 120000 ) => { const channelId = message?.channelId if (!message || !channelId) { console.error('Channel is inaccessible.') return } if (!pages) { console.error('Pages are not given.') return } if (emojiList.length !== 2) { console.error('Need two emojis.') return } pages[0].footer = {text: `Page 1 / ${pages.length}`} const channel = await this.getChannelById(channelId) as TextChannel if (!channel) { return console.error( `Channel ${channelId} introuvable dans: `, JSON.stringify(this.discordClient.channels.cache, null, 4) ) } const currentPage: Message = await channel.send({embeds: [pages[0]]} as MessageOptions) for (const emoji of emojiList) await currentPage.react(emoji) const filter = (reaction: MessageReaction, user: User) => { const emojiName = reaction.emoji.name return !!emojiName && message.author.id === user.id && emojiList.includes(emojiName) && !user.bot } let page = 0 const collector = currentPage.createReactionCollector({filter, time: timeout}) collector.on('collect', reaction => { reaction.users.remove(message.author) // Log 2 times ¯\_(ツ)_/¯ console.log('reaction', reaction) const emojiName = reaction.emoji.name const previous = emojiName === emojiList[0] if (previous && page <= 0 || !previous && page + 1 >= pages.length) return page = previous ? --page : ++page pages[page].footer = {text: `Page ${page + 1} / ${pages.length}`} currentPage.edit({embeds: [pages[page]]} as MessageOptions) }) collector.on('end', () => { currentPage.reactions.removeAll().catch() }) return currentPage } A: I did the check by myself and I only get one message per reaction. I think you may have two instances of your bot running at the same time on your machine. Try to restart your computer/VPS to see if it fixes it.
doc_579
{ path: ':word', redirectTo: 'blah/:word', pathMatch: 'full' } When i run this with parameter like localhost:4200/test#fragment Angular redirects it to localhost:4200/blah/test without the fragment. I have managed to bypass this by doing a resolver where I redirect to the right place with fragment, but it feels stupid because then I need to put I dummy component to the router like this: { path: ':word', resolve: { _: RedirectResolver }, component: DummyComponent, // We never go here } If I omit the component, then Angular won't work. Is there any good or native ways to accomplish this kind of behavior? A: You can set "preserveFragment" to true (doc : https://angular.io/api/router/NavigationExtras)
doc_580
from binance.client import Client from telebot import types from selenium import webdriver from bs4 import BeautifulSoup as BS from requests_html import HTMLSession import requests import json import pprint import pandas as pd client = Client(api_key, secret_key) r = requests.get("https://p2p.binance.com/en/trade/TinkoffNew/USDT?fiat=RUB").text html = BS(r, 'html.parser') item = html.find('div', {'class': 'css-1m1f8hn'}).get_text() --- 'NoneType' object has no attribute 'get_text' I tried these codes: item = html.find('div', {'class': 'css-1m1f8hn'}).get_text() #or item = html.find('div', {'class': 'css-1m1f8hn'}).text but I have always the same error: 'NoneType' object has no attribute 'get_text'
doc_581
and String abc="Hello";//will not create new instance When the string is created as literal the value "Hello" will be in the string constant pool and the reference will be in stack , What about the String object.where does its memory allocation happen? What does the intern() method do? Which is more efficient?
doc_582
* *getaddrinfo(3) failed for www.wp.pl#:80 *Couldn't resolve host 'www.wp.pl#' And the program terminates. And that is the problem, because I it to go futher and connect to other pages. I just won't parse and analyze the broken page I don't want to end my program after come across such one. I posted similar topic, but it contained only snippets of code so it was diffcult for you to say what is wrong, now I will post whole code. int PageHandler::getCleanAndRepairedPage(string pageURL, string& outDoc) throw (exception) { if (find(visitedPages.begin(), visitedPages.end(), pageURL) != visitedPages.end()) { // url has already been visited outDoc = ""; return VISITED_PAGE_ERROR; } else { // new url visitedPages.push_back(pageURL); char *charURL; charURL = const_cast<char*> (pageURL.c_str()); CURL *curl; char curl_errbuf[CURL_ERROR_SIZE]; TidyBuffer output = { 0 }; TidyBuffer errbuf = { 0 }; TidyBuffer docbuf = { 0 }; TidyDoc tdoc = tidyCreate(); // Initialize "document" tidyOptSetBool(tdoc, TidyForceOutput, yes); tidyOptSetInt(tdoc, TidyWrapLen, 4096); tidyBufInit(&docbuf); int rc = -1; Bool ok; curl = curl_easy_init(); curl_easy_setopt( curl, CURLOPT_URL, charURL ); curl_easy_setopt( curl, CURLOPT_ERRORBUFFER, curl_errbuf ); curl_easy_setopt( curl, CURLOPT_WRITEFUNCTION, write_cb ); curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L); curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L); curl_easy_setopt( curl , CURLOPT_WRITEDATA, &docbuf ); int err; err = curl_easy_perform(curl); cout << "curl_easy_perfom return value = " << err << endl; if (!err) { ok = tidyOptSetBool(tdoc, TidyXhtmlOut, yes); // Convert to XHTML if (ok) { rc = tidySetErrorBuffer(tdoc, &errbuf); // Capture diagnostics if (rc >= 0) { rc = tidyParseBuffer(tdoc, &docbuf); // parse the buffer } if (rc >= 0) { rc = tidyCleanAndRepair(tdoc); // Tidy it up! } if (rc >= 0) { rc = tidyRunDiagnostics(tdoc); // Kvetch } if (rc > 1) { // If error, force output. rc = (tidyOptSetBool(tdoc, TidyForceOutput, yes) ? rc : -1); } if (rc >= 0) { rc = tidySaveBuffer(tdoc, &output); // Pretty Print } } if (rc >= 0) { if (rc > 0) { //printf("\nDiagnostics:\n\n%s", errbuf.bp); } } else { printf("A severe error (%d) occurred.\n", rc); } } else { printf("%s\n", curl_errbuf); } if (err == NO_ERROR) { string tmp(reinterpret_cast<char const*> (output.bp)); outDoc = tmp; } else { outDoc = ""; } curl_easy_cleanup(curl); tidyBufFree(&docbuf); tidyBufFree(&output); tidyBufFree(&errbuf); tidyRelease(tdoc); return err; // err == 0 <=> everything ok! } } And the console output : EDIT : I forgot to say what is going on on console output. I connect to many pages and the invalid URL is just one of them. At the beginning you can see a succesful connection and cURL messages about it, the result of curl_easy_perform equals 0, which indicate that everything went good. Next message is about connection to the invalid URL, as u can see return value of curl_easy_perform is 6, which is not good. A: "www.wp.pl#" is not a legal URL and it causes libcurl to (mistakenly) use the entire string as a host name, and you don't have any such and the operation fails.
doc_583
http://angular-tips.com/blog/2013/08/watch-how-the-apply-runs-a-digest/ directive link function: element.bind('click', function() { scope.foo++; scope.bar++; scope.$apply(); }); a better way for using $apply: element.bind('click', function() { scope.$apply(function(){ scope.foo++; scope.bar++; }); }); What’s the difference? The difference is that in the first version, we are updating the values outside the angular context so if that throws an error, Angular will never know. Obviously in this tiny toy example it won’t make much difference, but imagine that we have an alert box to show errors to our users and we have a 3rd party library that does a network call and it fails. If we don’t wrap it inside an $apply, Angular will never know about the failure and the alert box won’t be there. Confusion: Why angular need to know error, i just need to show it for users. for example, there is an ajax request in link fn of directive, I just need to tell what happened if fails. A: TAngular $scope has a function called $apply() which takes a function as an argument. AngularJS says that it will know about model mutation only if that mutation is done inside $apply(). So you simply need to put the code that changes models inside a function and call $scope.apply(), passing that function as an argument. After the $apply() function call ends, AngularJS knows that some model changes might have occurred. It then starts a digest cycle by calling another function —- $rootScope.$digest() — which propagates to all child scopes. In the digest cycle watchers are called to check if the model value has changed. if a value has changed, the corresponding listener function then gets called. Now it’s upto the listener how it handles the model changes. The Ajax call through Angular buildin $http the model mutation code is implicitly wrapped withing $apply() call, so you don’t need any additional steps.
doc_584
web server.py ##flask server program app static app.js controllers.js etc... templates index.html home.html index.html <!-- this didn't work --> <ng-include src="templates/home.html"><ng-include> <!-- nor did this --> <ng-include src="home.html"></ng-include> home.html <h1> home! </h1> Except I don't see the partial (home.html) in my output. Anyone see my mistake? A: The src attribute of ng-include expects a string. Either you pass a scope variable, or pass string directly. <ng-include src=" 'templates/home.html' "></ng-include>
doc_585
i get the following result: {"flag":"failed","msg":"insert event","event id":"89","invitedusers":"[0508690186, 0508690187]","size_invited":1,"user_query":"SELECT id From users WHERE "} and i would like to know how can i read the values in PHP under "invitedusers":"[0508690186, 0508690187]" this is my php code: <?php /** * Created by PhpStorm. * User: matant * Date: 9/17/2015 * Time: 2:56 PM */ include 'response_process.php'; class CreateEvent implements ResponseProcess { public function dataProcess($dblink) { $output = array(); $sport = $_POST["sport_type"]; $date = date("Y-m-d",strtotime(str_replace('/','-',$_POST["date"]))); $s_time =$_POST["s_time"]; $e_time = $_POST["e_time"]; $lon = $_POST["lon"]; $lat = $_POST["lat"]; $event_type = $_POST["event_type"]; $max_p = $_POST["max_participants"]; $sched = $_POST["scheduled"]; $gen = $_POST["gender"]; $min_age = $_POST["minAge"]; $query = "SELECT * FROM event WHERE (event.longtitude = '$lon' AND event.latitude = '$lat') AND event.event_date = '$date' And ((event.start_time BETWEEN '$s_time' AND '$e_time') OR (event.end_time BETWEEN '$s_time' AND '$e_time'))"; //AND (event.start_time = '$s_time' AND event.end_time = '$e_time') //check time and place of the event $result_q = mysqli_query($dblink,$query) or die (mysqli_error($dblink)); if(!$result_q) { $output["flag"]= "select failed"; $output["msg"] = $result_q; return json_encode($output); } //case date and time are available else { $no_of_rows = mysqli_num_rows($result_q); if ($no_of_rows < 1) { $output["flag"] = "success"; $output["msg"] = "insert event"; $result = mysqli_query($dblink, "INSERT into event(kind_of_sport,event_date,start_time,end_time,longtitude,latitude,private,gender,min_age,max_participants,scheduled,event_status) VALUES ('$sport','$date','$s_time','$e_time','$lon','$lat','$event_type','$gen','$min_age','$max_p','$sched','1')") or die (mysqli_error($dblink)); if (!$result) { $output["flag"] = "failed to create event"; // return (json_encode($output)); } if(isset($_POST["invitedUsers"])){ $query_id = "SELECT id From event WHERE event.event_date = '$date' and event.start_time = '$s_time' and event.end_time = '$e_time'"; $event_s_res = mysqli_query($dblink,$query_id) or die (mysqli_error($dblink)); if(!$event_s_res) { $output["flag"] = "failed"; $output["msg"] = "Event id not found"; } else{ $row = mysqli_fetch_assoc($event_s_res); $output["event id"]=$row["id"]; $json = json_decode($_POST["invitedUsers"]); $invited_users = str_replace("\\","",$json); $output["invitedusers"] = $_POST["invitedUsers"] ; $output["size_invited"] = count($_POST["invitedUsers"]); $query_users = "SELECT id From users WHERE "; $i=0; foreach($invited_users as $user) { if ($i < (count($invited_users) - 1)) // add a space at end of this string $query_users .= "users.mobile = '".$user[$i]."' or "; else { // and this one too $query_users .= "users.mobile = '".$user[$i]."' "; $output["users"][] = $user['mobile']; } $i++; $output["index"]=$i; } $output["user_query"]= $query_users; /* $event_user_s_res = mysqli_query($dblink,$query_users) or die (mysqli_error($dblink)); if(!$event_user_s_res) { $output["flag"] = "failed"; $output["msg"] = "user id not found"; }*/ } $output["flag"] = "failed"; } } else { $output["flag"] = "failed"; $output["msg"] = "Place is already occupied in this time"; } } return json_encode($output); } } A: i resolve this issue by passing a JSON object from the application and using json_decode method which convert it back.
doc_586
I do have something like this: var begin = $("#begin_date").val(); var end = $("#end_date").val(); $("#table").html("<%= escape_javascript(render partial: 'my_partial', locals: { begin_date: begin_d, end_date: end_d}) %>") So, I know that 'begin_d' and 'end_d' must be .erb in order to be loaded. Is there anyway to pass javascript vars in order to render a partial with custom locals values? Or any other way to achieve the same? My intent is to refresh a table based on the dates (if they are changed). EDIT: I found the answer. Phlip helped pointing that I should use Ajax. So now, I made an Ajax request to a controller that simply does: respond_to do |format| format.html {render partial: 'my_partial', locals: {begin_date: params[:begin_date], end_date: params[:end_date]}} A: To send JS variables into ERB, you gotta use Ajax. JS runs in the browser, and ERB runs in the server, so there's no way around looking up how to use $('#table').load() there. Unless if you could do with one of the zillion ways JS can cook its own new HTML, without ERB. The only problem then is you wouldn't be DRY, if you also needed to output that same HTML at page load time, from the server.
doc_587
I want to know if it is possible to write some plugin that helps in things like auto completion or suggestion like normal java plugins or is it that plugins can be written for only compiler/interpreter based languages ? A: You can write editors for any sort of file. If the file is text based the Eclipse text editor APIs provides support for things like auto completion. One of the examples available in the `New / Plug-in project' wizard creates a XML editor with some of theses features already included.
doc_588
I'm using prestashop 1.7.2.4 and I would like to add a link in the pages of a product, that redirects the customer to the already existing contact form, which needs to be pre-compiled with the name of the selected product. Do you know if there's a way to do that? Thanks in advance. A: Your link should be like this: http://your-shop.com/contact-us?message=name_of_your_product A: In your TPL : {$link->getPageLink('contact')}?message='YOUR MESSAGE' In html : WEBSITE_URL?message='YOUR MESSAGE' Regards
doc_589
list-style-type: none; padding: 0px 5px 0px 5px; margin: 0px; } ul li { border-bottom: 1px solid #B9D3EE; } ul li a:link, ul li a:visited, ul li a:active { width: 100%; color: blue; } ul li a: hover { width: 100%; color: #ffffff; background-color: #B9D3EE; } In IE the above code will highlight the complete cell when hovered. But in FF it will only highlight the link that is within it. I would like FF to highlight the complete cell as IE does. Here is the list: Keep in mind that only the first link has been created because I have just started creating this list and stopped to test it when I noticed this problem. <ul> <li><a href="">beauty</a></li> <li>creative</li> <li>Info Tech. (IT)</li> <li>cycle</li> <li>event</li> <li>financial</li> <li>legal</li> <li>lessons</li> <li>medical</li> <li>marine</li> <li>pet</li> <li>automotive</li> <li>farm+garden</li> <li>household</li> <li>labor/move</li> <li>MKT/COMM</li> <li>office</li> <li>skill'd trade</li> <li>real estate</li> <li>health/wellness</li> <li>travel/vac</li> <li>write/ed/tr8</li> </ul> Any help is much appreciated! A: You can make your a elements as block elements, so they will get all width of parents elements (demo: http://jsfiddle.net/WasWE/). ul li a:link, ul li a:visited, ul li a:active { display: block; color: blue; } ul li a:hover { background-color: #B9D3EE; color: #ffffff; }​ Or you can add hover event to li elements (demo: http://jsfiddle.net/XmwTV/): ul li:hover { background-color: #B9D3EE; } ul li a:link, ul li a:visited, ul li a:active { color: blue; } ul li a:hover { color: #ffffff; } A: Hi now remove with 100% in your anchor link css and define display block in you css in anchor as like this ul li a: link, ul li a: visited, ul li a: active { display:block; // add this line width:100%; // remove this line color: blue } ul li a:hover{ width:100%; //remove this line color: #ffffff; background-color: #B9D3EE; } Demo
doc_590
All the examples on the scalatra page is with squeryl etc, but I dont want orm and dsl. Do anyone have a ok examples with scalatra and c3p0. Thanks all :) A: In addition to Steve's response, you can use a scala object for the collectionPoolDataSource instead of getting it from the request context. For example, declaring: object DBDataSource { private val ds = new ComboPooledDataSource ds.setDriverClass("org.mariadb.jdbc.Driver") ds.setUser(dbUser) ds.setPassword(dbPassword) ds.setDebugUnreturnedConnectionStackTraces(true) ds.setUnreturnedConnectionTimeout(7200) ds.setMaxPoolSize(100) ds.setMaxStatements(0) ds.setCheckoutTimeout(60000) ds.setMinPoolSize(5) ds.setTestConnectionOnCheckin(true) ds.setTestConnectionOnCheckout(false) ds.setBreakAfterAcquireFailure(false) ds.setIdleConnectionTestPeriod(50) ds.setMaxIdleTimeExcessConnections(240) ds.setAcquireIncrement(1) ds.setAcquireRetryAttempts(5) ds.setJdbcUrl(dbUrl) ds.setPreferredTestQuery("SELECT 1") def datasource = ds } and you can access to datasource without needing the request context: def withConnection[T](op: (Connection) => T): T = { var con: Connection = null try { con = DBDataSource.datasource.getConnection() op(con) } finally { attemptClose(con) } } A: Note: None of the code below has been compiled or checked, i'm just writing it into my browser. apologies for the inevitable glitches. So, I've never used Scalatra. But I wrote c3p0, and have used the Servlets API a lot. A quick look at scalatra's guides suggests something like this would work: import org.scalatra._ import com.mchange.v2.c3p0._ import javax.sql.DataSource import javax.servlet.ServletContext class ScalatraBootstrap extends LifeCycle { override def init(context: ServletContext) { val cpds = new ConnectionPoolDataSource(); // perform any c3p0 config operations you might // want here, or better yet, externalize all of // that into a c3p0.properties, c3p0-config.xml, // or (c3p0 version 0.9.5 only) application.conf context.setAttribute( "appDataSource", cpds ); } override def destroy(context: ServletContext) { val cpds = context.getAttribute( "appDataSource" ); if ( cpds != null ) { try { cpds.close() } catch { case e : Exception => e.printStackTrace(); //consider better logging than this } } } } To get access to the DataSource from a ServletRequest object, you'd call... request.getServletContext().getAttribute( "appDataSource" ).asInstanceOf[DataSource] You might want to use your Scala-fu to pimp ServletRequest and make access to the Connection pool easier and prettier. For example, you could write... implicit class ConnectionPoolRequest( request : ServletRequest ) { def connectionPool : DataSource = request.getServletContext().getAttribute( "appDataSource" ).asInstanceOf[DataSource] } Put this in a package object or some object you import into your code, and while you handle requests you should be able to write stuff like... val conn = request.connectionPool.getConnection(); // do stuff conn.close() However, the above is crappy, leak-prone code, because the close() isn't in a finally and will be skipped by an Exception. In Java7-style, you'd use try-with-resources to avoid this. In Scala, the naive way is to do this: var conn = null; try { conn = request.connectionPool.getConnection(); // do stuff } finally { try { if ( conn != null ) conn.close() } catch { case e : Exception => e.printStackTrace() // better logging would be nice } } However, a better way in Scala is to define utility methods like this: def withConnection[T]( ds : DataSource )( op : (Connection) => T) : T = { var con : Connection = null; try { con = ds.getConnection(); op(con); } finally { attemptClose( con ); } } def attemptClose( con : Connection ) { if ( con != null ) { try { if ( conn != null ) conn.close() } catch { case e : Exception => e.printStackTrace() // better logging would be nice } } } Then you can just write stuff like... withConnection( request.connectionPool ) { conn => // do stuff with the Connection // don't worry about cleaning up, that's taken care of for you } To really keep JDBC clean in scala, consider writing analogous methods like withStatement and withResultSet as well, so you can do withConnection( request.connectionPool ) { conn => withStatement( conn ) { stmt => withResultSet( stmt.executeQuery("SELECT * FROM spacemen") ) { rs => // read stuff from ResultSet } } }
doc_591
Emulator opening for marshmallow..... Please suggest on this.
doc_592
@interface CheckInController () @property (nonatomic, assign) int checkInDate; @end the code of button's selector - (void)checkin { self.checkInDate++; NSLog(@"checkInDate: %d",_checkInDate); } whatever times I click the button,the console panel shows like this 2017-08-01 16:46:39.631 HeJing[1888:64607] checkInDate: 0 2017-08-01 16:46:40.057 HeJing[1888:64607] checkInDate: 0 2017-08-01 16:46:40.342 HeJing[1888:64607] checkInDate: 0 2017-08-01 16:46:40.578 HeJing[1888:64607] checkInDate: 0 after that i assign some int value like this self.checkInDate = 1; NSLog(@"checkInDate: %d",_checkInDate); the console panel always show 2017-08-01 17:06:38.182 HeJing[1991:75284] checkInDate: 0 2017-08-01 17:06:39.101 HeJing[1991:75284] checkInDate: 0 2017-08-01 17:06:39.255 HeJing[1991:75284] checkInDate: 0 2017-08-01 17:06:39.401 HeJing[1991:75284] checkInDate: 0 did I do something wrong? the code above all run in the .m file. and my setter method - (void)setCheckInDate:(int)checkInDate { NSString *infoString = [NSString stringWithFormat:@"已连续签到%d天,再坚持%d天就可以积分翻倍哦!",_checkInDate,6 - _checkInDate]; NSMutableAttributedString *attributedString = [[NSMutableAttributedString alloc] initWithString:infoString]; [attributedString addAttribute:NSForegroundColorAttributeName value:[UIColor colorWithRed:0.97 green:0.44 blue:0.13 alpha:1.00] range:NSMakeRange(5, 2)]; [attributedString addAttribute:NSForegroundColorAttributeName value:[UIColor colorWithRed:0.97 green:0.44 blue:0.13 alpha:1.00] range:NSMakeRange(11, 2)]; _checkInInfoLabel.attributedText = attributedString; } A: You are using a custom setter by overriding - (void)setCheckInDate:(int)checkInDate. So when you write self.checkInDate = 1;, it's calling the custom setter. But in that setter you didn't write _checkInDate = checkInDate. Adding this should fix your issue.
doc_593
Integral(1/(x^4 + y^4)dxdy) where y >=x^2+1 A: How about Integrate[1/(x^4 + y^4), y, x, Assumptions -> y >= x^2 + 1] note also Multiple Integral A: I assume you mean the definite integral over all x, y>x^2+1. The syntax is this: Integrate[1/(x^4 + y^4), {x, -Infinity, Infinity},{ y, x^2 + 1,Infinity}] Note mathematica's ordering of the integration variables is reverse of standard convention, ie. left to right is outside to inside. This takes quite a while to report that it does not converge. However the numerical integration gives a result: NIntegrate[1/(x^4 + y^4), {x, -Infinity, Infinity},{ y, x^2 + 1,Infinity}] 0.389712 My guess is the numeric result is correct and mathematica is simply wrong about the analytic convergence. You might try math.stackexchange.com or mathematica.stackexchange.com if you need to prove convergence. I am doubtful there is a nice analytic result.
doc_594
private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) { PreparedStatement ps = null; try { Class.forName("com.mysql.jdbc.Driver"); Connection conn = DriverManager.getConnection("jdbc:mysql://localhoast:3306//a","root","root"); Statement smt = conn.createStatement(); ps = conn.prepareStatement("insert into aone values (?,?,?)"); String n = name.getText(); String a = age.getText(); String r = roll.getText(); ps.setString(1,n); ps.setString(2,a); ps.setString(3,r); int i = ps.executeUpdate(); if (i>0) { JOptionPane.showMessageDialog(null, "data is saved"); } else { JOptionPane.showMessageDialog(null, "error"); } } catch(Exception e) { } } A: Print the stack trace in the catch block. The JVM might be throwing an exception, but you'll never know it this way. When you do that, I'm sure you'll be told that JDBC could not connect to "localhoast". I doubt that "localhoast" is correct; try "localhost". There are so many things wrong with this code: * *Should not mingle UI and database code. *Should not get JDBC connection with every request; use a pool. *Code is not layered; hard to test. *You don't close any JDBC resources in method scope. Try it like this: private void jButtonActionPerformed(java.awt.event.ActionEvent evt) { Connection conn = null; PreparedStatement ps = null; try { Class.forName("com.mysql.jdbc.Driver"); Connection conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/a","root","root"); ps = conn.prepareStatement("insert into aone values (?,?,?)"); String n = name.getText(); String a = age.getText(); String r = roll.getText(); ps.setString(1,n); ps.setString(2,a); ps.setString(3,r); int i = ps.executeUpdate(); if (i > 0) { JOptionPane.showMessageDialog(null, "data is saved"); } else { JOptionPane.showMessageDialog(null, "error"); } } catch(Exception e) { e.printStackTrace(); } finally { close(ps); // You need to implement this close(conn); // You need to implement this } }
doc_595
Example Laravel: <a href="{!!URL::to('link1')!!}" class="{{Request::is('link1') ? 'activeMenu' : '' }} >Link1</a> My ReactJS code: <Nav className="navbar-logged"> <NavItem> <NavLink className="nav-link-gdc" href="/">HOME</NavLink> <NavLink className="nav-link-gdc" href="#">LINK1</NavLink> <NavLink className="nav-link-gdc" href="#">LINK2</NavLink> <NavLink className="nav-link-gdc" href="#">LINK3</NavLink> <NavLink className="nav-link-gdc" href="#">LINK4</NavLink> </NavItem> </Nav> A: If you are using react-router Just add your path in to argument, rather than in href. Like this: `<NavLink className="nav-link-gdc" to="/">HOME</NavLink>` And it will add class active automatically. See the docs for NavLink If not and that NavLink is from reactstrap Then you will have to add some router logic to be able to mark nav link as active or, if you don't need changing urls in address bar, make something like this answer on SO A: Yes, we can control the class name of the NavLink:- const pathName = this.props.history.location.pathname; <NavLink to='/pathNameHome' className='header__route--link' activeClassName={pathName === '/home || pathName === '/dashborad' ? 'header__route--active' : ''} > In above code 'pathName' we are taking from the history location, if you want to put condition on pathName then you can also do that {pathName === '/home || pathName === '/dashborad' ? 'header__route--active' : ''} in this block we are comparing the path , if you want directly then you can write activeClassName= 'header__route--active' so once the path is selected the class name 'header__route--active will activate else the class name header__route--link will be activated by default.
doc_596
Is there a simple way to setup azure deployment and only give read-only access to a single repo using Githubs readonly deployment keys? A: When you setup deployment from GitHub, one part of the process is to pull the source (from doc you refer to): Azure creates an association with the selected repository, and pulls in the files from the specified branch. But another, and most important for the CI/CD process is creation of a new WebHook to your repository. Setting up a Web Hook requires write permissions to the repository, thus the requirement for Read/Write. You can also read on the CD process in Project KUDU Wiki (note: Azure Web Apps runs on the Project KUDU)
doc_597
https://del.dog/gilegnacay
doc_598
Here is how I register sw.js in in Angulars main file: platformBrowserDynamic() .bootstrapModule(AppModule) .then(registerServiceWorker) .catch(err => console.log(err)); function registerServiceWorker() { if ('serviceWorker' in navigator) { navigator.serviceWorker .register('sw.js') .then(reg => { log('Registration successful', reg); reg.onupdatefound = () => { const installingWorker = reg.installing; installingWorker.onstatechange = () => { switch (installingWorker.state) { case 'installed': if (navigator.serviceWorker.controller) { log('New or updated content is available', installingWorker); } else { log('Content is now available offline', installingWorker); } break; case 'redundant': console.error('The installing service worker became redundant', installingWorker); break; default: log(installingWorker.state); break; } }; }; }) .catch(e => { console.error('Error during service worker registration:', e); }); } else { console.warn('Service Worker is not supported'); } } my generate-sw file that i run with npm const workboxBuild = require('workbox-build'); const SRC_DIR = 'src'; const BUILD_DIR = 'public'; const SW = 'sw.js'; const globPatterns = [ "**/*.{ico,html,css,js,woff,json}" ]; const globIgnores = [ "sw.js" ]; const input = { swSrc: `${SRC_DIR}/${SW}`, swDest: `${BUILD_DIR}/${SW}`, globDirectory: BUILD_DIR, globPatterns: globPatterns, globIgnores: globIgnores, maximumFileSizeToCacheInBytes: 4000000 }; workboxBuild.injectManifest(input).then(() => { console.log(`The service worker ${BUILD_DIR}/${SW} has been injected`); }); and the base sw.js file importScripts('workbox-sw.prod.v2.1.2.js'); const workboxSW = new self.WorkboxSW({clientsClaim: true}); workboxSW.precache([]); workboxSW.router.registerNavigationRoute('/index.html'); *****Update***** Using express.js for the development server I set the Cache-Control to zero and now when I reload the page the service worker updates to the newer version. Am confused in production with Cache-Control set to days/years how long does it take for a service worker to update then and will it clear the old cash and indexDB or do we have to do it manually here is the code for express: app.use('/', express.static(publicFolderPath, {maxAge: 0})); app.get('*', (req, res) => { res.sendFile('index.html'); }); A: * *Service worker won't change while the site is open and it has been updated on the server (that is it won't change in background), it will change when the site is opened after it has been updated. Make sure that the browser isn't caching it, its cache TTL/max-age should be set to 0 *New service worker won't out of the box clean the old service worker's cache, you will need to do this housekeeping manually on 'Activate' event Eg: self.addEventListener('activate', function(event) { event.waitUntil( caches.keys().then(function(cacheNames) { return Promise.all( cacheNames.filter(function(cacheName) { // Return true if you want to remove this cache, // but remember that caches are shared across // the whole origin }).map(function(cacheName) { return caches.delete(cacheName); }) ); }) ); }); Refer this link: https://developers.google.com/web/ilt/pwa/caching-files-with-service-worker Hope this helps
doc_599
A: After some help from Prerender.io team, here are the outlined steps that resulted in successful crawling by the Facebook and Google crawler tests. Remember this is for an AngularJS app running on a Parse.com backend * *add $locationProvider.hashPrefix("!") to your .config in your main module (I am not using HTML5Mode because it causes issues when manually entering urls). *add prerender-parse to the TOP of your cloud/app.js and implement prerender-parse according to the instructions found here var express = require('express'); var app = express(); var parseAdaptor = require('cloud/prerender-parse.js'); app.use(require('cloud/prerenderio.js').setAdaptor(parseAdaptor(Parse)).set('prerenderToken','YOUR_PARSE_TOKEN')); *add <meta name="fragment" content="!" /> to the <head> of your index.html Bonus - dynamic metadata from child controllers for crawlers: B1. Add a controller with event to your main app if you don't already have one. <html lang="en" ng-app="roommi" ng-controller="MainCtrl">` .controller('MainCtrl', ['$rootScope', '$scope', '$state', '$stateParams', function($rootScope, $scope, $state, $stateParams) { $scope.$on('metaUpdate', function(event, metadata) { $scope.metadata = metadata; }); } B3. In your child controller set your metadata object and call the $emit function to cast the event to the MainCtrl. $scope.$emit('metaUpdate', metadata); B4. Now you can add all of the metadata to your head in your index.html <meta property="og:url" content="{{metadata.url}}"/> <meta property="og:title" content="{{metadata.title}}"/> <meta property="og:image" content="{{metadata.image}}"/> <meta property="og:description" content="{{metadata.desc}}"/>` B4. One caveat is that this method does not control the timing of the cache by prerender.io. So only basic queries and data manipulation can be performed before the population of the metadata object. If someone figures out a good way to deal with timing, let me know. I tried the window.prerenderReady method provided by prerender.io, but it did not work in a few configurations I tried. A: If I recall correctly, my three obstacles in making this work were: a) Making $locationProvider.html5Mode(true) work b) NOT using a hash prefix (e.g. "#", "#!") c) Making nginx correctly parse the "escaped fragment". I believe it's all covered quite well on Prerender's site. If memory serves, their founder also personally responds to emails and provides help.